[libcxx-commits] [clang] [flang] [libcxx] [llvm] [mlir] [AMDGPU]: Rewrite mbcnt_lo/mbcnt_hi to work item ID where applicable (PR #160496)
Teja Alaghari via libcxx-commits
libcxx-commits at lists.llvm.org
Mon Sep 29 01:01:09 PDT 2025
https://github.com/TejaX-Alaghari updated https://github.com/llvm/llvm-project/pull/160496
>From a8966cbd914a9a8fa379a532d7d4dea57cef3df5 Mon Sep 17 00:00:00 2001
From: Teja Alaghari <teja.alaghari at amd.com>
Date: Wed, 24 Sep 2025 13:57:35 +0530
Subject: [PATCH 01/35] AMDGPU: fold mbcnt_hi(~0, mbcnt_lo(~0,0)) to
llvm.amdgcn.workitem.id.x() when reqd_work_group_size proves X==wave
This adds a conservative InstCombine peephole handling the exact
pattern mbcnt.hi(~0, mbcnt.lo(~0, 0)). The transformation is applied
equals the target wavefront size.
Signed-off-by: Teja Alaghari <teja.alaghari at amd.com>
---
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 125 +++++++++++++++++-
.../AMDGPU/mbcnt-to-bitmask-neg.ll | 18 +++
.../AMDGPU/mbcnt-to-bitmask-posit.ll | 20 +++
.../AMDGPU/mbcnt-to-workitem-neg.ll | 16 +++
.../AMDGPU/mbcnt-to-workitem-posit.ll | 18 +++
5 files changed, 194 insertions(+), 3 deletions(-)
create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 4fe5d00679436..509e2b019224f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUInstrInfo.h"
+#include "AMDGPUSubtarget.h"
#include "AMDGPUTargetTransformInfo.h"
#include "GCNSubtarget.h"
#include "llvm/ADT/FloatingPointMode.h"
@@ -28,6 +29,10 @@ using namespace llvm::PatternMatch;
#define DEBUG_TYPE "AMDGPUtti"
+// Common wavefront sizes used in several conservative checks below.
+static constexpr unsigned WavefrontSize32 = 32u;
+static constexpr unsigned WavefrontSize64 = 64u;
+
namespace {
struct AMDGPUImageDMaskIntrinsic {
@@ -1312,9 +1317,122 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
}
case Intrinsic::amdgcn_mbcnt_hi: {
- // exec_hi is all 0, so this is just a copy.
- if (ST->isWave32())
+ // exec_hi is all 0, so this is just a copy on wave32.
+ if (ST && ST->isWave32())
return IC.replaceInstUsesWith(II, II.getArgOperand(1));
+
+ // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+ if (auto *HiArg1 = dyn_cast<CallInst>(II.getArgOperand(1))) {
+ Function *CalledF = HiArg1->getCalledFunction();
+ bool IsMbcntLo = false;
+ if (CalledF) {
+ // Fast-path: if this is a declared intrinsic, check the intrinsic ID.
+ if (CalledF->getIntrinsicID() == Intrinsic::amdgcn_mbcnt_lo) {
+ IsMbcntLo = true;
+ } else {
+ // Fallback: accept a declared function with the canonical name, but
+ // verify its signature to be safe: i32(i32,i32). Use the name
+ // comparison only when there's no intrinsic ID match.
+ if (CalledF->getName() == "llvm.amdgcn.mbcnt.lo") {
+ if (FunctionType *FT = CalledF->getFunctionType()) {
+ if (FT->getNumParams() == 2 &&
+ FT->getReturnType()->isIntegerTy(32) &&
+ FT->getParamType(0)->isIntegerTy(32) &&
+ FT->getParamType(1)->isIntegerTy(32))
+ IsMbcntLo = true;
+ }
+ }
+ }
+ }
+
+ if (!IsMbcntLo)
+ break;
+
+ // hi arg0 must be all-ones
+ if (auto *HiArg0C = dyn_cast<ConstantInt>(II.getArgOperand(0))) {
+ if (!HiArg0C->isAllOnesValue())
+ break;
+ } else
+ break;
+
+ // lo args: arg0 == ~0, arg1 == 0
+ Value *Lo0 = HiArg1->getArgOperand(0);
+ Value *Lo1 = HiArg1->getArgOperand(1);
+ auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
+ auto *Lo1C = dyn_cast<ConstantInt>(Lo1);
+ if (!Lo0C || !Lo1C)
+ break;
+ if (!Lo0C->isAllOnesValue() || !Lo1C->isZero())
+ break;
+
+ // Query reqd_work_group_size via subtarget helper and compare X to wave
+ // size conservatively.
+ if (Function *F = II.getFunction()) {
+ unsigned Wave = 0;
+ if (ST && ST->isWaveSizeKnown())
+ Wave = ST->getWavefrontSize();
+
+ if (ST) {
+ if (auto MaybeX = ST->getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && (XLen == WavefrontSize32 ||
+ XLen == WavefrontSize64))
+ Wave = XLen; // allow common sizes under test harness
+
+ if (Wave != 0 && XLen == Wave) {
+ SmallVector<Type *, 0> OverloadTys;
+ CallInst *NewCall = IC.Builder.CreateIntrinsic(
+ Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
+ NewCall->takeName(&II);
+ // Attach range metadata when available.
+ ST->makeLIDRangeMetadata(NewCall);
+ return IC.replaceInstUsesWith(II, NewCall);
+ }
+ // Optional: if X dimension evenly splits into wavefronts we can
+ // replace lane-id computation with a bitmask when the wave is a
+ // power-of-two. Use the Subtarget helper to conservatively decide
+ // when per-wave tiling is preserved.
+ if (ST->hasWavefrontsEvenlySplittingXDim(
+ *F, /*RequiresUniformYZ=*/true)) {
+ if (Wave != 0 && isPowerOf2_32(Wave)) {
+ // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid &
+ // mask
+ SmallVector<Type *, 0> OverloadTys;
+ CallInst *Tid = IC.Builder.CreateIntrinsic(
+ Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
+ Tid->takeName(&II);
+ IntegerType *ITy = cast<IntegerType>(Tid->getType());
+ Constant *Mask = ConstantInt::get(ITy, Wave - 1);
+ Instruction *AndInst =
+ cast<Instruction>(IC.Builder.CreateAnd(Tid, Mask));
+ AndInst->takeName(&II);
+ // Attach range metadata for the result if possible.
+ ST->makeLIDRangeMetadata(AndInst);
+ return IC.replaceInstUsesWith(II, AndInst);
+ }
+ }
+ }
+ } else {
+ // No ST: be conservative and only handle the common test harness
+ // cases where reqd_work_group_size metadata exists and equals
+ // 32/64.
+ if (auto *Node = F->getMetadata("reqd_work_group_size")) {
+ if (Node->getNumOperands() == 3) {
+ unsigned XLen = mdconst::extract<ConstantInt>(Node->getOperand(0))
+ ->getZExtValue();
+ if (XLen == WavefrontSize32 || XLen == WavefrontSize64) {
+ SmallVector<Type *, 0> OverloadTys;
+ CallInst *NewCall = IC.Builder.CreateIntrinsic(
+ Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
+ NewCall->takeName(&II);
+ return IC.replaceInstUsesWith(II, NewCall);
+ }
+ }
+ }
+ }
+ }
+ }
+
break;
}
case Intrinsic::amdgcn_ballot: {
@@ -1328,7 +1446,8 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
return IC.replaceInstUsesWith(II, Constant::getNullValue(II.getType()));
}
}
- if (ST->isWave32() && II.getType()->getIntegerBitWidth() == 64) {
+ if (ST->isWave32() &&
+ II.getType()->getIntegerBitWidth() == WavefrontSize64) {
// %b64 = call i64 ballot.i64(...)
// =>
// %b32 = call i32 ballot.i32(...)
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
new file mode 100644
index 0000000000000..0313f284e5775
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
@@ -0,0 +1,18 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+; CHECK-NOT: and i32
+; CHECK-NOT: @llvm.amdgcn.workitem.id.x()
+
+; ModuleID = 'mbcnt_to_bitmask_neg'
+
+define i32 @kernel() !reqd_work_group_size !1 {
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!1 = !{i32 48, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
new file mode 100644
index 0000000000000..b87913edc8805
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -0,0 +1,20 @@
+; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+; CHECK: @llvm.amdgcn.workitem.id.x()
+; CHECK-NOT: call i32 @llvm.amdgcn.mbcnt.hi
+; CHECK-NOT: call i32 @llvm.amdgcn.mbcnt.lo
+
+; ModuleID = 'mbcnt_to_bitmask_posit'
+
+define i32 @kernel() !reqd_work_group_size !1 {
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!1 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
new file mode 100644
index 0000000000000..1779b631be9f6
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
@@ -0,0 +1,16 @@
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+; CHECK: llvm.amdgcn.mbcnt.lo
+; CHECK: llvm.amdgcn.mbcnt.hi
+; CHECK-NOT: call i32 @llvm.amdgcn.workitem.id.x()
+
+define i32 @kernel() {
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
new file mode 100644
index 0000000000000..d3d8d40b8359d
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -0,0 +1,18 @@
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+; CHECK-NOT: amdgcn.mbcnt_lo
+; CHECK-NOT: amdgcn.mbcnt_hi
+; CHECK: @llvm.amdgcn.workitem.id.x()
+
+define i32 @kernel() !reqd_work_group_size !0 {
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ %b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
+ ret i32 %b
+}
+
+!0 = !{i32 64, i32 1, i32 1}
+
+; Declarations
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
+declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+declare i32 @llvm.amdgcn.workitem.id.x()
>From 2a4edebb7ca20e520cf81aae4e36b3b7ccab9e5f Mon Sep 17 00:00:00 2001
From: Luke Hutton <luke.hutton at arm.com>
Date: Wed, 24 Sep 2025 09:06:35 +0100
Subject: [PATCH 02/35] [mlir][tosa] Fix validation check on controlflow
operators (#159754)
Previoulsy the error_if check for controlflow operators would silently
fail on valid controflow operators. This was due to incorrect return
logic in the validation function. This commit fixes that logic.
---
.../Tosa/Transforms/TosaValidation.cpp | 8 ++---
mlir/test/Dialect/Tosa/error_if_check.mlir | 33 ------------------
.../Tosa/tosa-validation-valid-strict.mlir | 34 +++++++++++++++++++
3 files changed, 38 insertions(+), 37 deletions(-)
create mode 100644 mlir/test/Dialect/Tosa/tosa-validation-valid-strict.mlir
diff --git a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
index 91fea676ac44a..e9fdcbdc15837 100644
--- a/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
+++ b/mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
@@ -1257,8 +1257,8 @@ bool checkErrorIfCondIf(Operation *op) {
// tosa.yield %arg4
// }
- return failed(checkIsolatedRegion(op, ifOp.getThenGraph(), "then")) ||
- failed(checkIsolatedRegion(op, ifOp.getElseGraph(), "else"));
+ return succeeded(checkIsolatedRegion(op, ifOp.getThenGraph(), "then")) &&
+ succeeded(checkIsolatedRegion(op, ifOp.getElseGraph(), "else"));
}
bool checkErrorIfWhileLoop(Operation *op) {
@@ -1266,8 +1266,8 @@ bool checkErrorIfWhileLoop(Operation *op) {
if (!whileOp)
return true;
- return failed(checkIsolatedRegion(op, whileOp.getCondGraph(), "cond")) ||
- failed(checkIsolatedRegion(op, whileOp.getBodyGraph(), "body"));
+ return succeeded(checkIsolatedRegion(op, whileOp.getCondGraph(), "cond")) &&
+ succeeded(checkIsolatedRegion(op, whileOp.getBodyGraph(), "body"));
}
bool checkErrorIfScatter(Operation *op) {
diff --git a/mlir/test/Dialect/Tosa/error_if_check.mlir b/mlir/test/Dialect/Tosa/error_if_check.mlir
index 290773b23193f..2f9421c43d2fb 100644
--- a/mlir/test/Dialect/Tosa/error_if_check.mlir
+++ b/mlir/test/Dialect/Tosa/error_if_check.mlir
@@ -269,20 +269,6 @@ func.func @test_cond_if_simplified_form_not_isolated_from_above(%arg0: tensor<f3
// -----
-// Check isolated cond_if's are valid
-func.func @test_cond_if_isolated_from_above(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1>) -> tensor<f32> {
- %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({
- ^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
- tosa.yield %arg3 : tensor<f32>
- }, {
- ^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
- tosa.yield %arg4 : tensor<f32>
- }) : (tensor<i1>, tensor<f32>, tensor<f32>) -> tensor<f32>
- return %0 : tensor<f32>
-}
-
-// -----
-
func.func @test_while_loop_cond_not_isolated_from_above(%arg0: tensor<i32>, %arg1: tensor<i32>, %arg2: tensor<f32>) {
%0 = "tosa.const"() {values = dense<0> : tensor<i32>} : () -> tensor<i32>
// expected-error at +1 {{'tosa.while_loop' op is not conformant to the TOSA specification. It requires the 'cond' region is isolated from above.}}
@@ -318,22 +304,3 @@ func.func @test_while_loop_body_not_isolated_from_above(%arg0: tensor<i32>, %arg
}) : (tensor<i32>) -> (tensor<i32>)
return
}
-
-// -----
-
-// Check isolated while_loops are valid
-func.func @test_while_loop_isolated_from_above(%arg0: tensor<f32>, %arg1: tensor<i32>) {
- %0 = "tosa.const"() {values = dense<0> : tensor<i32>} : () -> tensor<i32>
- %1:3 = "tosa.while_loop"(%0, %arg0, %arg1) ({
- ^bb0(%arg3: tensor<i32>, %arg4: tensor<f32>, %arg5: tensor<i32>):
- %2 = "tosa.greater_equal"(%arg3, %arg5) : (tensor<i32>, tensor<i32>) -> tensor<i1>
- %3 = "tosa.logical_not"(%2) : (tensor<i1>) -> tensor<i1>
- "tosa.yield"(%3) : (tensor<i1>) -> ()
- }, {
- ^bb0(%arg3: tensor<i32>, %arg4: tensor<f32>, %arg5: tensor<i32>):
- %2 = "tosa.const"() {values = dense<1> : tensor<i32>} : () -> tensor<i32>
- %3 = "tosa.add"(%arg3, %2) : (tensor<i32>, tensor<i32>) -> tensor<i32>
- "tosa.yield"(%3, %arg4, %arg5) : (tensor<i32>, tensor<f32>, tensor<i32>) -> ()
- }) : (tensor<i32>, tensor<f32>, tensor<i32>) -> (tensor<i32>, tensor<f32>, tensor<i32>)
- return
-}
diff --git a/mlir/test/Dialect/Tosa/tosa-validation-valid-strict.mlir b/mlir/test/Dialect/Tosa/tosa-validation-valid-strict.mlir
new file mode 100644
index 0000000000000..f05ae7f58261d
--- /dev/null
+++ b/mlir/test/Dialect/Tosa/tosa-validation-valid-strict.mlir
@@ -0,0 +1,34 @@
+// RUN: mlir-opt %s -split-input-file -verify-diagnostics --tosa-validate="profile=pro_int,pro_fp extension=int16,int4,bf16,fp8e4m3,fp8e5m2,fft,variable,controlflow,doubleround,inexactround strict-op-spec-alignment" | FileCheck %s
+
+// -----
+
+// CHECK-LABEL: test_cond_if_isolated_from_above
+func.func @test_cond_if_isolated_from_above(%arg0: tensor<f32>, %arg1: tensor<f32>, %arg2: tensor<i1>) -> tensor<f32> {
+ %0 = "tosa.cond_if"(%arg2, %arg0, %arg1) ({
+ ^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
+ tosa.yield %arg3 : tensor<f32>
+ }, {
+ ^bb0(%arg3: tensor<f32>, %arg4: tensor<f32>):
+ tosa.yield %arg4 : tensor<f32>
+ }) : (tensor<i1>, tensor<f32>, tensor<f32>) -> tensor<f32>
+ return %0 : tensor<f32>
+}
+
+// -----
+
+// CHECK-LABEL: test_while_loop_isolated_from_above
+func.func @test_while_loop_isolated_from_above(%arg0: tensor<f32>, %arg1: tensor<i32>) {
+ %0 = "tosa.const"() {values = dense<0> : tensor<i32>} : () -> tensor<i32>
+ %1:3 = "tosa.while_loop"(%0, %arg0, %arg1) ({
+ ^bb0(%arg3: tensor<i32>, %arg4: tensor<f32>, %arg5: tensor<i32>):
+ %2 = "tosa.greater_equal"(%arg3, %arg5) : (tensor<i32>, tensor<i32>) -> tensor<i1>
+ %3 = "tosa.logical_not"(%2) : (tensor<i1>) -> tensor<i1>
+ "tosa.yield"(%3) : (tensor<i1>) -> ()
+ }, {
+ ^bb0(%arg3: tensor<i32>, %arg4: tensor<f32>, %arg5: tensor<i32>):
+ %2 = "tosa.const"() {values = dense<1> : tensor<i32>} : () -> tensor<i32>
+ %3 = "tosa.add"(%arg3, %2) : (tensor<i32>, tensor<i32>) -> tensor<i32>
+ "tosa.yield"(%3, %arg4, %arg5) : (tensor<i32>, tensor<f32>, tensor<i32>) -> ()
+ }) : (tensor<i32>, tensor<f32>, tensor<i32>) -> (tensor<i32>, tensor<f32>, tensor<i32>)
+ return
+}
>From c8f63b18398c32914e443304e61e5cf5033ca186 Mon Sep 17 00:00:00 2001
From: Jonas Paulsson <paulson1 at linux.ibm.com>
Date: Wed, 24 Sep 2025 10:28:35 +0200
Subject: [PATCH 03/35] [MachineScheduler] Turn SU->isScheduled check into an
assert in pickNode() (#160145)
It is unnecessary and confusing to have a do/while loop that checks
SU->isScheduled as this should never be true.
ScheduleDAGMI::updateQueues() is always called after pickNode() and it
sets isScheduled on the SU. Turn this into an assertion instead.
---
llvm/lib/CodeGen/MachineScheduler.cpp | 120 +++++++++++++-------------
1 file changed, 59 insertions(+), 61 deletions(-)
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index c6fa8f42757db..299bcc46e4bd2 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -4157,33 +4157,32 @@ SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
return nullptr;
}
SUnit *SU;
- do {
- if (RegionPolicy.OnlyTopDown) {
- SU = Top.pickOnlyChoice();
- if (!SU) {
- CandPolicy NoPolicy;
- TopCand.reset(NoPolicy);
- pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
- assert(TopCand.Reason != NoCand && "failed to find a candidate");
- tracePick(TopCand);
- SU = TopCand.SU;
- }
- IsTopNode = true;
- } else if (RegionPolicy.OnlyBottomUp) {
- SU = Bot.pickOnlyChoice();
- if (!SU) {
- CandPolicy NoPolicy;
- BotCand.reset(NoPolicy);
- pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
- assert(BotCand.Reason != NoCand && "failed to find a candidate");
- tracePick(BotCand);
- SU = BotCand.SU;
- }
- IsTopNode = false;
- } else {
- SU = pickNodeBidirectional(IsTopNode);
+ if (RegionPolicy.OnlyTopDown) {
+ SU = Top.pickOnlyChoice();
+ if (!SU) {
+ CandPolicy NoPolicy;
+ TopCand.reset(NoPolicy);
+ pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
+ assert(TopCand.Reason != NoCand && "failed to find a candidate");
+ tracePick(TopCand);
+ SU = TopCand.SU;
}
- } while (SU->isScheduled);
+ IsTopNode = true;
+ } else if (RegionPolicy.OnlyBottomUp) {
+ SU = Bot.pickOnlyChoice();
+ if (!SU) {
+ CandPolicy NoPolicy;
+ BotCand.reset(NoPolicy);
+ pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
+ assert(BotCand.Reason != NoCand && "failed to find a candidate");
+ tracePick(BotCand);
+ SU = BotCand.SU;
+ }
+ IsTopNode = false;
+ } else {
+ SU = pickNodeBidirectional(IsTopNode);
+ }
+ assert(!SU->isScheduled && "SUnit scheduled twice.");
// If IsTopNode, then SU is in Top.Available and must be removed. Otherwise,
// if isTopReady(), then SU is in either Top.Available or Top.Pending.
@@ -4524,43 +4523,42 @@ SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
return nullptr;
}
SUnit *SU;
- do {
- if (RegionPolicy.OnlyBottomUp) {
- SU = Bot.pickOnlyChoice();
- if (SU) {
- tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true);
- } else {
- CandPolicy NoPolicy;
- BotCand.reset(NoPolicy);
- // Set the bottom-up policy based on the state of the current bottom
- // zone and the instructions outside the zone, including the top zone.
- setPolicy(BotCand.Policy, /*IsPostRA=*/true, Bot, nullptr);
- pickNodeFromQueue(Bot, BotCand);
- assert(BotCand.Reason != NoCand && "failed to find a candidate");
- tracePick(BotCand, /*IsPostRA=*/true);
- SU = BotCand.SU;
- }
- IsTopNode = false;
- } else if (RegionPolicy.OnlyTopDown) {
- SU = Top.pickOnlyChoice();
- if (SU) {
- tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true);
- } else {
- CandPolicy NoPolicy;
- TopCand.reset(NoPolicy);
- // Set the top-down policy based on the state of the current top zone
- // and the instructions outside the zone, including the bottom zone.
- setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
- pickNodeFromQueue(Top, TopCand);
- assert(TopCand.Reason != NoCand && "failed to find a candidate");
- tracePick(TopCand, /*IsPostRA=*/true);
- SU = TopCand.SU;
- }
- IsTopNode = true;
+ if (RegionPolicy.OnlyBottomUp) {
+ SU = Bot.pickOnlyChoice();
+ if (SU) {
+ tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true);
} else {
- SU = pickNodeBidirectional(IsTopNode);
+ CandPolicy NoPolicy;
+ BotCand.reset(NoPolicy);
+ // Set the bottom-up policy based on the state of the current bottom
+ // zone and the instructions outside the zone, including the top zone.
+ setPolicy(BotCand.Policy, /*IsPostRA=*/true, Bot, nullptr);
+ pickNodeFromQueue(Bot, BotCand);
+ assert(BotCand.Reason != NoCand && "failed to find a candidate");
+ tracePick(BotCand, /*IsPostRA=*/true);
+ SU = BotCand.SU;
}
- } while (SU->isScheduled);
+ IsTopNode = false;
+ } else if (RegionPolicy.OnlyTopDown) {
+ SU = Top.pickOnlyChoice();
+ if (SU) {
+ tracePick(Only1, /*IsTopNode=*/true, /*IsPostRA=*/true);
+ } else {
+ CandPolicy NoPolicy;
+ TopCand.reset(NoPolicy);
+ // Set the top-down policy based on the state of the current top zone
+ // and the instructions outside the zone, including the bottom zone.
+ setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
+ pickNodeFromQueue(Top, TopCand);
+ assert(TopCand.Reason != NoCand && "failed to find a candidate");
+ tracePick(TopCand, /*IsPostRA=*/true);
+ SU = TopCand.SU;
+ }
+ IsTopNode = true;
+ } else {
+ SU = pickNodeBidirectional(IsTopNode);
+ }
+ assert(!SU->isScheduled && "SUnit scheduled twice.");
if (SU->isTopReady())
Top.removeReady(SU);
>From fb8a3e5621272eca407ab360b98dedfe554ee856 Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Wed, 24 Sep 2025 10:30:51 +0200
Subject: [PATCH 04/35] [clang][bytecode] Use stack offsets for This/RVO ptrs
(#160285)
Instead of keeping the `Pointer`s itself in `InterpFrame`, just save
them as offsets and use stackRef<>() when we need them.
---
clang/lib/AST/ByteCode/Interp.cpp | 8 ++---
clang/lib/AST/ByteCode/Interp.h | 50 +++++++++++++-------------
clang/lib/AST/ByteCode/InterpFrame.cpp | 15 ++++----
clang/lib/AST/ByteCode/InterpFrame.h | 18 ++++++----
4 files changed, 47 insertions(+), 44 deletions(-)
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index 0f322f6ed42ac..8aaefc70e506e 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -1027,8 +1027,8 @@ static bool CheckCallDepth(InterpState &S, CodePtr OpPC) {
return true;
}
-bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This) {
- if (!This.isZero())
+bool CheckThis(InterpState &S, CodePtr OpPC) {
+ if (S.Current->hasThisPointer())
return true;
const Expr *E = S.Current->getExpr(OpPC);
@@ -1198,8 +1198,8 @@ static bool runRecordDestructor(InterpState &S, CodePtr OpPC,
const Record *R = Desc->ElemRecord;
assert(R);
- if (Pointer::pointToSameBlock(BasePtr, S.Current->getThis()) &&
- S.Current->getFunction()->isDestructor()) {
+ if (S.Current->hasThisPointer() && S.Current->getFunction()->isDestructor() &&
+ Pointer::pointToSameBlock(BasePtr, S.Current->getThis())) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_double_destroy);
return false;
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index b3b4b998439cc..3bc1a67feeba2 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -104,7 +104,7 @@ bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr);
/// Checks the 'this' pointer.
-bool CheckThis(InterpState &S, CodePtr OpPC, const Pointer &This);
+bool CheckThis(InterpState &S, CodePtr OpPC);
/// Checks if dynamic memory allocation is available in the current
/// language mode.
@@ -1440,9 +1440,9 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool GetThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
if (S.checkingPotentialConstantExpression())
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
const Pointer &Field = This.atField(I);
if (!CheckLoad(S, OpPC, Field))
return false;
@@ -1454,10 +1454,10 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
if (S.checkingPotentialConstantExpression())
return false;
+ if (!CheckThis(S, OpPC))
+ return false;
const T &Value = S.Stk.pop<T>();
const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
- return false;
const Pointer &Field = This.atField(I);
if (!CheckStore(S, OpPC, Field))
return false;
@@ -1560,9 +1560,9 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0)
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
const Pointer &Field = This.atField(I);
assert(Field.canBeInitialized());
Field.deref<T>() = S.Stk.pop<T>();
@@ -1574,9 +1574,9 @@ template <PrimType Name, class T = typename PrimConv<Name>::T>
bool InitThisFieldActivate(InterpState &S, CodePtr OpPC, uint32_t I) {
if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0)
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
const Pointer &Field = This.atField(I);
assert(Field.canBeInitialized());
Field.deref<T>() = S.Stk.pop<T>();
@@ -1593,9 +1593,9 @@ bool InitThisBitField(InterpState &S, CodePtr OpPC, const Record::Field *F,
assert(F->isBitField());
if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0)
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
const Pointer &Field = This.atField(FieldOffset);
assert(Field.canBeInitialized());
const auto &Value = S.Stk.pop<T>();
@@ -1610,9 +1610,9 @@ bool InitThisBitFieldActivate(InterpState &S, CodePtr OpPC,
assert(F->isBitField());
if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0)
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
const Pointer &Field = This.atField(FieldOffset);
assert(Field.canBeInitialized());
const auto &Value = S.Stk.pop<T>();
@@ -1750,9 +1750,9 @@ bool GetPtrFieldPop(InterpState &S, CodePtr OpPC, uint32_t Off);
inline bool GetPtrThisField(InterpState &S, CodePtr OpPC, uint32_t Off) {
if (S.checkingPotentialConstantExpression() && S.Current->getDepth() == 0)
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
S.Stk.push<Pointer>(This.atField(Off));
return true;
}
@@ -1844,9 +1844,9 @@ inline bool GetMemberPtrBasePop(InterpState &S, CodePtr OpPC, int32_t Off) {
inline bool GetPtrThisBase(InterpState &S, CodePtr OpPC, uint32_t Off) {
if (S.checkingPotentialConstantExpression())
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
S.Stk.push<Pointer>(This.atField(Off));
return true;
}
@@ -1925,10 +1925,10 @@ inline bool GetPtrThisVirtBase(InterpState &S, CodePtr OpPC,
assert(D);
if (S.checkingPotentialConstantExpression())
return false;
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
- return VirtBaseHelper(S, OpPC, D, S.Current->getThis());
+ const Pointer &This = S.Current->getThis();
+ return VirtBaseHelper(S, OpPC, D, This);
}
//===----------------------------------------------------------------------===//
@@ -1991,6 +1991,8 @@ static inline bool Activate(InterpState &S, CodePtr OpPC) {
static inline bool ActivateThisField(InterpState &S, CodePtr OpPC, uint32_t I) {
if (S.checkingPotentialConstantExpression())
return false;
+ if (!S.Current->hasThisPointer())
+ return false;
const Pointer &Ptr = S.Current->getThis();
assert(Ptr.atField(I).canBeInitialized());
@@ -2813,13 +2815,11 @@ inline bool IsNonNull(InterpState &S, CodePtr OpPC) {
inline bool This(InterpState &S, CodePtr OpPC) {
// Cannot read 'this' in this mode.
- if (S.checkingPotentialConstantExpression()) {
+ if (S.checkingPotentialConstantExpression())
return false;
- }
-
- const Pointer &This = S.Current->getThis();
- if (!CheckThis(S, OpPC, This))
+ if (!CheckThis(S, OpPC))
return false;
+ const Pointer &This = S.Current->getThis();
// Ensure the This pointer has been cast to the correct base.
if (!This.isDummy()) {
diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp
index c411a371282ef..a3db0d7a29cfa 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.cpp
+++ b/clang/lib/AST/ByteCode/InterpFrame.cpp
@@ -58,15 +58,12 @@ InterpFrame::InterpFrame(InterpState &S, const Function *Func, CodePtr RetPC,
// If the fuction has a This pointer, that one is next.
// Then follow the actual arguments (but those are handled
// in getParamPointer()).
- if (Func->hasRVO())
- RVOPtr = stackRef<Pointer>(0);
-
- if (Func->hasThisPointer()) {
- if (Func->hasRVO())
- This = stackRef<Pointer>(sizeof(Pointer));
- else
- This = stackRef<Pointer>(0);
+ if (Func->hasRVO()) {
+ // RVO pointer offset is always 0.
}
+
+ if (Func->hasThisPointer())
+ ThisPointerOffset = Func->hasRVO() ? sizeof(Pointer) : 0;
}
InterpFrame::~InterpFrame() {
@@ -167,7 +164,7 @@ void InterpFrame::describe(llvm::raw_ostream &OS) const {
/*Indentation=*/0);
OS << ".";
} else if (const auto *M = dyn_cast<CXXMethodDecl>(F)) {
- print(OS, This, S.getASTContext(),
+ print(OS, getThis(), S.getASTContext(),
S.getASTContext().getLValueReferenceType(
S.getASTContext().getCanonicalTagType(M->getParent())));
OS << ".";
diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h
index 129851155bd86..3cdc164e4bdda 100644
--- a/clang/lib/AST/ByteCode/InterpFrame.h
+++ b/clang/lib/AST/ByteCode/InterpFrame.h
@@ -104,11 +104,19 @@ class InterpFrame final : public Frame {
/// Returns a pointer to an argument - lazily creates a block.
Pointer getParamPointer(unsigned Offset);
+ bool hasThisPointer() const { return Func && Func->hasThisPointer(); }
/// Returns the 'this' pointer.
- const Pointer &getThis() const { return This; }
+ const Pointer &getThis() const {
+ assert(hasThisPointer());
+ return stackRef<Pointer>(ThisPointerOffset);
+ }
/// Returns the RVO pointer, if the Function has one.
- const Pointer &getRVOPtr() const { return RVOPtr; }
+ const Pointer &getRVOPtr() const {
+ assert(Func);
+ assert(Func->hasRVO());
+ return stackRef<Pointer>(0);
+ }
/// Checks if the frame is a root frame - return should quit the interpreter.
bool isRoot() const { return !Func; }
@@ -163,10 +171,8 @@ class InterpFrame final : public Frame {
unsigned Depth;
/// Reference to the function being executed.
const Function *Func;
- /// Current object pointer for methods.
- Pointer This;
- /// Pointer the non-primitive return value gets constructed in.
- Pointer RVOPtr;
+ /// Offset of the instance pointer. Use with stackRef<>().
+ unsigned ThisPointerOffset;
/// Return address.
CodePtr RetPC;
/// The size of all the arguments.
>From a1bb5eba36cf92aba55255e2a3cf6d87c2b9c24f Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Wed, 24 Sep 2025 10:34:53 +0200
Subject: [PATCH 05/35] [libc++] Simplify some of the <bit> functions (#160267)
---
libcxx/include/__bit/countl.h | 2 +-
libcxx/include/__bit/countr.h | 2 +-
libcxx/include/__bit/has_single_bit.h | 2 +-
libcxx/include/__bit/rotate.h | 41 +++++++-------------
libcxx/test/libcxx/numerics/bit.ops.pass.cpp | 3 --
5 files changed, 18 insertions(+), 32 deletions(-)
diff --git a/libcxx/include/__bit/countl.h b/libcxx/include/__bit/countl.h
index 075914020879a..29b01277fb0eb 100644
--- a/libcxx/include/__bit/countl.h
+++ b/libcxx/include/__bit/countl.h
@@ -37,7 +37,7 @@ template <__unsigned_integer _Tp>
template <__unsigned_integer _Tp>
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countl_one(_Tp __t) noexcept {
- return __t != numeric_limits<_Tp>::max() ? std::countl_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits;
+ return std::countl_zero(static_cast<_Tp>(~__t));
}
#endif // _LIBCPP_STD_VER >= 20
diff --git a/libcxx/include/__bit/countr.h b/libcxx/include/__bit/countr.h
index f6c98695d3d06..4de887ad4f67c 100644
--- a/libcxx/include/__bit/countr.h
+++ b/libcxx/include/__bit/countr.h
@@ -37,7 +37,7 @@ template <__unsigned_integer _Tp>
template <__unsigned_integer _Tp>
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countr_one(_Tp __t) noexcept {
- return __t != numeric_limits<_Tp>::max() ? std::countr_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits;
+ return std::countr_zero(static_cast<_Tp>(~__t));
}
#endif // _LIBCPP_STD_VER >= 20
diff --git a/libcxx/include/__bit/has_single_bit.h b/libcxx/include/__bit/has_single_bit.h
index b43e69323e77b..d10ab7d6c1791 100644
--- a/libcxx/include/__bit/has_single_bit.h
+++ b/libcxx/include/__bit/has_single_bit.h
@@ -25,7 +25,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <__unsigned_integer _Tp>
[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool has_single_bit(_Tp __t) noexcept {
- return __t != 0 && (((__t & (__t - 1)) == 0));
+ return __t != 0 && ((__t & (__t - 1)) == 0);
}
_LIBCPP_END_NAMESPACE_STD
diff --git a/libcxx/include/__bit/rotate.h b/libcxx/include/__bit/rotate.h
index c6f34bdaf6e63..fde9058887779 100644
--- a/libcxx/include/__bit/rotate.h
+++ b/libcxx/include/__bit/rotate.h
@@ -22,46 +22,35 @@ _LIBCPP_BEGIN_NAMESPACE_STD
// Writing two full functions for rotl and rotr makes it easier for the compiler
// to optimize the code. On x86 this function becomes the ROL instruction and
// the rotr function becomes the ROR instruction.
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotl(_Tp __x, int __s) _NOEXCEPT {
- static_assert(__is_unsigned_integer_v<_Tp>, "__rotl requires an unsigned integer type");
+
+#if _LIBCPP_STD_VER >= 20
+
+template <__unsigned_integer _Tp>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotl(_Tp __t, int __cnt) noexcept {
const int __n = numeric_limits<_Tp>::digits;
- int __r = __s % __n;
+ int __r = __cnt % __n;
if (__r == 0)
- return __x;
+ return __t;
if (__r > 0)
- return (__x << __r) | (__x >> (__n - __r));
+ return (__t << __r) | (__t >> (__n - __r));
- return (__x >> -__r) | (__x << (__n + __r));
+ return (__t >> -__r) | (__t << (__n + __r));
}
-template <class _Tp>
-_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotr(_Tp __x, int __s) _NOEXCEPT {
- static_assert(__is_unsigned_integer_v<_Tp>, "__rotr requires an unsigned integer type");
+template <__unsigned_integer _Tp>
+[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotr(_Tp __t, int __cnt) noexcept {
const int __n = numeric_limits<_Tp>::digits;
- int __r = __s % __n;
+ int __r = __cnt % __n;
if (__r == 0)
- return __x;
+ return __t;
if (__r > 0)
- return (__x >> __r) | (__x << (__n - __r));
-
- return (__x << -__r) | (__x >> (__n + __r));
-}
+ return (__t >> __r) | (__t << (__n - __r));
-#if _LIBCPP_STD_VER >= 20
-
-template <__unsigned_integer _Tp>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotl(_Tp __t, int __cnt) noexcept {
- return std::__rotl(__t, __cnt);
-}
-
-template <__unsigned_integer _Tp>
-[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp rotr(_Tp __t, int __cnt) noexcept {
- return std::__rotr(__t, __cnt);
+ return (__t << -__r) | (__t >> (__n + __r));
}
#endif // _LIBCPP_STD_VER >= 20
diff --git a/libcxx/test/libcxx/numerics/bit.ops.pass.cpp b/libcxx/test/libcxx/numerics/bit.ops.pass.cpp
index 7f502d6e01d1e..061f7030eca0b 100644
--- a/libcxx/test/libcxx/numerics/bit.ops.pass.cpp
+++ b/libcxx/test/libcxx/numerics/bit.ops.pass.cpp
@@ -11,7 +11,6 @@
#include <__bit/bit_log2.h>
#include <__bit/countl.h>
-#include <__bit/rotate.h>
#include <cassert>
#include "test_macros.h"
@@ -19,10 +18,8 @@
TEST_CONSTEXPR_CXX14 bool test() {
const unsigned v = 0x12345678;
- ASSERT_SAME_TYPE(unsigned, decltype(std::__rotr(v, 3)));
ASSERT_SAME_TYPE(int, decltype(std::__countl_zero(v)));
- assert(std::__rotr(v, 3) == 0x02468acfU);
assert(std::__countl_zero(v) == 3);
#if TEST_STD_VER > 17
>From ba7b47dca359e3440de14ae1aa5ca72bcea43d47 Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Wed, 24 Sep 2025 10:40:36 +0200
Subject: [PATCH 06/35] [libc++] Simplify __memory/shared_count.h a bit
(#160048)
This removes a few checks that aren't required anymore and moves some
code around to the places where it's actually used.
---
libcxx/include/__memory/shared_count.h | 39 +++++--------------
libcxx/include/__mutex/once_flag.h | 12 +++++-
libcxx/include/mutex | 5 ++-
.../test/libcxx/transitive_includes/cxx26.csv | 1 -
4 files changed, 23 insertions(+), 34 deletions(-)
diff --git a/libcxx/include/__memory/shared_count.h b/libcxx/include/__memory/shared_count.h
index dad20bcabd7ea..b40d8c9cf77d1 100644
--- a/libcxx/include/__memory/shared_count.h
+++ b/libcxx/include/__memory/shared_count.h
@@ -22,37 +22,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD
// NOTE: Relaxed and acq/rel atomics (for increment and decrement respectively)
// should be sufficient for thread safety.
// See https://llvm.org/PR22803
-#if (defined(__clang__) && __has_builtin(__atomic_add_fetch) && defined(__ATOMIC_RELAXED) && \
- defined(__ATOMIC_ACQ_REL)) || \
- defined(_LIBCPP_COMPILER_GCC)
-# define _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT 1
-#else
-# define _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT 0
-#endif
-
-template <class _ValueType>
-inline _LIBCPP_HIDE_FROM_ABI _ValueType __libcpp_relaxed_load(_ValueType const* __value) {
-#if _LIBCPP_HAS_THREADS && defined(__ATOMIC_RELAXED) && \
- (__has_builtin(__atomic_load_n) || defined(_LIBCPP_COMPILER_GCC))
- return __atomic_load_n(__value, __ATOMIC_RELAXED);
-#else
- return *__value;
-#endif
-}
-
-template <class _ValueType>
-inline _LIBCPP_HIDE_FROM_ABI _ValueType __libcpp_acquire_load(_ValueType const* __value) {
-#if _LIBCPP_HAS_THREADS && defined(__ATOMIC_ACQUIRE) && \
- (__has_builtin(__atomic_load_n) || defined(_LIBCPP_COMPILER_GCC))
- return __atomic_load_n(__value, __ATOMIC_ACQUIRE);
-#else
- return *__value;
-#endif
-}
template <class _Tp>
inline _LIBCPP_HIDE_FROM_ABI _Tp __libcpp_atomic_refcount_increment(_Tp& __t) _NOEXCEPT {
-#if _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT && _LIBCPP_HAS_THREADS
+#if _LIBCPP_HAS_THREADS
return __atomic_add_fetch(std::addressof(__t), 1, __ATOMIC_RELAXED);
#else
return __t += 1;
@@ -61,7 +34,7 @@ inline _LIBCPP_HIDE_FROM_ABI _Tp __libcpp_atomic_refcount_increment(_Tp& __t) _N
template <class _Tp>
inline _LIBCPP_HIDE_FROM_ABI _Tp __libcpp_atomic_refcount_decrement(_Tp& __t) _NOEXCEPT {
-#if _LIBCPP_HAS_BUILTIN_ATOMIC_SUPPORT && _LIBCPP_HAS_THREADS
+#if _LIBCPP_HAS_THREADS
return __atomic_add_fetch(std::addressof(__t), -1, __ATOMIC_ACQ_REL);
#else
return __t -= 1;
@@ -95,7 +68,13 @@ class _LIBCPP_EXPORTED_FROM_ABI __shared_count {
return false;
}
#endif
- _LIBCPP_HIDE_FROM_ABI long use_count() const _NOEXCEPT { return __libcpp_relaxed_load(&__shared_owners_) + 1; }
+ _LIBCPP_HIDE_FROM_ABI long use_count() const _NOEXCEPT {
+#if _LIBCPP_HAS_THREADS
+ return __atomic_load_n(&__shared_owners_, __ATOMIC_RELAXED) + 1;
+#else
+ return __shared_owners_ + 1;
+#endif
+ }
};
class _LIBCPP_EXPORTED_FROM_ABI __shared_weak_count : private __shared_count {
diff --git a/libcxx/include/__mutex/once_flag.h b/libcxx/include/__mutex/once_flag.h
index e384c15a9f9b6..808b1ea99cc0b 100644
--- a/libcxx/include/__mutex/once_flag.h
+++ b/libcxx/include/__mutex/once_flag.h
@@ -10,10 +10,9 @@
#define _LIBCPP___MUTEX_ONCE_FLAG_H
#include <__config>
-#include <__functional/invoke.h>
#include <__memory/addressof.h>
-#include <__memory/shared_count.h> // __libcpp_acquire_load
#include <__tuple/tuple_size.h>
+#include <__type_traits/invoke.h>
#include <__utility/forward.h>
#include <__utility/integer_sequence.h>
#include <__utility/move.h>
@@ -118,6 +117,15 @@ void _LIBCPP_HIDE_FROM_ABI __call_once_proxy(void* __vp) {
_LIBCPP_EXPORTED_FROM_ABI void __call_once(volatile once_flag::_State_type&, void*, void (*)(void*));
+template <class _ValueType>
+inline _LIBCPP_HIDE_FROM_ABI _ValueType __libcpp_acquire_load(_ValueType const* __value) {
+#if _LIBCPP_HAS_THREADS
+ return __atomic_load_n(__value, __ATOMIC_ACQUIRE);
+#else
+ return *__value;
+#endif
+}
+
#ifndef _LIBCPP_CXX03_LANG
template <class _Callable, class... _Args>
diff --git a/libcxx/include/mutex b/libcxx/include/mutex
index 58474e0ca2b7a..0b81f1bb1c8a6 100644
--- a/libcxx/include/mutex
+++ b/libcxx/include/mutex
@@ -500,6 +500,10 @@ _LIBCPP_END_NAMESPACE_STD
_LIBCPP_POP_MACROS
+# if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 23
+# include <typeinfo>
+# endif
+
# if !defined(_LIBCPP_REMOVE_TRANSITIVE_INCLUDES) && _LIBCPP_STD_VER <= 20
# include <atomic>
# include <concepts>
@@ -513,7 +517,6 @@ _LIBCPP_POP_MACROS
# include <stdexcept>
# include <system_error>
# include <type_traits>
-# include <typeinfo>
# endif
#endif // __cplusplus < 201103L && defined(_LIBCPP_USE_FROZEN_CXX03_HEADERS)
diff --git a/libcxx/test/libcxx/transitive_includes/cxx26.csv b/libcxx/test/libcxx/transitive_includes/cxx26.csv
index 5f906338f4b7c..81c8c41d88756 100644
--- a/libcxx/test/libcxx/transitive_includes/cxx26.csv
+++ b/libcxx/test/libcxx/transitive_includes/cxx26.csv
@@ -669,7 +669,6 @@ mutex ctime
mutex limits
mutex ratio
mutex tuple
-mutex typeinfo
mutex version
new version
numbers version
>From 24c5ef04d245fe71e7b3941457984741c4f45959 Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Wed, 24 Sep 2025 10:41:29 +0200
Subject: [PATCH 07/35] [libc++][NFC] Refactor __is_allocator to be a variable
template (#159584)
---
libcxx/include/__flat_map/flat_map.h | 32 ++++---
libcxx/include/__flat_map/flat_multimap.h | 32 ++++---
libcxx/include/__flat_set/flat_multiset.h | 24 +++---
libcxx/include/__flat_set/flat_set.h | 24 +++---
libcxx/include/__type_traits/is_allocator.h | 13 ++-
libcxx/include/__vector/vector.h | 8 +-
libcxx/include/deque | 8 +-
libcxx/include/forward_list | 8 +-
libcxx/include/list | 8 +-
libcxx/include/map | 36 ++++----
libcxx/include/module.modulemap.in | 5 +-
libcxx/include/queue | 40 ++++-----
libcxx/include/set | 36 ++++----
libcxx/include/sstream | 8 +-
libcxx/include/stack | 8 +-
libcxx/include/string | 14 ++--
libcxx/include/unordered_map | 84 +++++++++----------
libcxx/include/unordered_set | 76 ++++++++---------
.../test/libcxx/memory/is_allocator.pass.cpp | 14 ++--
19 files changed, 232 insertions(+), 246 deletions(-)
diff --git a/libcxx/include/__flat_map/flat_map.h b/libcxx/include/__flat_map/flat_map.h
index bf193f6d3c62f..31ba9bc0b91ac 100644
--- a/libcxx/include/__flat_map/flat_map.h
+++ b/libcxx/include/__flat_map/flat_map.h
@@ -1125,8 +1125,7 @@ class flat_map {
};
template <class _KeyContainer, class _MappedContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -1139,7 +1138,7 @@ flat_map(_KeyContainer, _MappedContainer, _Compare = _Compare())
template <class _KeyContainer, class _MappedContainer, class _Allocator>
requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
- !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value)
+ !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>)
flat_map(_KeyContainer, _MappedContainer, _Allocator)
-> flat_map<typename _KeyContainer::value_type,
typename _MappedContainer::value_type,
@@ -1148,9 +1147,8 @@ flat_map(_KeyContainer, _MappedContainer, _Allocator)
_MappedContainer>;
template <class _KeyContainer, class _MappedContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> &&
- uses_allocator_v<_MappedContainer, _Allocator> &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
+ uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -1162,8 +1160,7 @@ flat_map(_KeyContainer, _MappedContainer, _Compare, _Allocator)
_MappedContainer>;
template <class _KeyContainer, class _MappedContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -1176,7 +1173,7 @@ flat_map(sorted_unique_t, _KeyContainer, _MappedContainer, _Compare = _Compare()
template <class _KeyContainer, class _MappedContainer, class _Allocator>
requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
- !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value)
+ !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>)
flat_map(sorted_unique_t, _KeyContainer, _MappedContainer, _Allocator)
-> flat_map<typename _KeyContainer::value_type,
typename _MappedContainer::value_type,
@@ -1185,9 +1182,8 @@ flat_map(sorted_unique_t, _KeyContainer, _MappedContainer, _Allocator)
_MappedContainer>;
template <class _KeyContainer, class _MappedContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> &&
- uses_allocator_v<_MappedContainer, _Allocator> &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
+ uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -1199,19 +1195,19 @@ flat_map(sorted_unique_t, _KeyContainer, _MappedContainer, _Compare, _Allocator)
_MappedContainer>;
template <class _InputIterator, class _Compare = less<__iter_key_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_map(_InputIterator, _InputIterator, _Compare = _Compare())
-> flat_map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>;
template <class _InputIterator, class _Compare = less<__iter_key_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_map(sorted_unique_t, _InputIterator, _InputIterator, _Compare = _Compare())
-> flat_map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>;
template <ranges::input_range _Range,
class _Compare = less<__range_key_type<_Range>>,
class _Allocator = allocator<byte>,
- class = __enable_if_t<!__is_allocator<_Compare>::value && __is_allocator<_Allocator>::value>>
+ class = __enable_if_t<!__is_allocator_v<_Compare> && __is_allocator_v<_Allocator>>>
flat_map(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_map<
__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -1219,7 +1215,7 @@ flat_map(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator(
vector<__range_key_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_key_type<_Range>>>,
vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>;
-template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator_v<_Allocator>>>
flat_map(from_range_t, _Range&&, _Allocator) -> flat_map<
__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -1228,11 +1224,11 @@ flat_map(from_range_t, _Range&&, _Allocator) -> flat_map<
vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>;
template <class _Key, class _Tp, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_map(initializer_list<pair<_Key, _Tp>>, _Compare = _Compare()) -> flat_map<_Key, _Tp, _Compare>;
template <class _Key, class _Tp, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_map(sorted_unique_t, initializer_list<pair<_Key, _Tp>>, _Compare = _Compare()) -> flat_map<_Key, _Tp, _Compare>;
template <class _Key, class _Tp, class _Compare, class _KeyContainer, class _MappedContainer, class _Allocator>
diff --git a/libcxx/include/__flat_map/flat_multimap.h b/libcxx/include/__flat_map/flat_multimap.h
index 260d93ed25785..abaacf9e3cda3 100644
--- a/libcxx/include/__flat_map/flat_multimap.h
+++ b/libcxx/include/__flat_map/flat_multimap.h
@@ -928,8 +928,7 @@ class flat_multimap {
};
template <class _KeyContainer, class _MappedContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -942,7 +941,7 @@ flat_multimap(_KeyContainer, _MappedContainer, _Compare = _Compare())
template <class _KeyContainer, class _MappedContainer, class _Allocator>
requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
- !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value)
+ !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>)
flat_multimap(_KeyContainer, _MappedContainer, _Allocator)
-> flat_multimap<typename _KeyContainer::value_type,
typename _MappedContainer::value_type,
@@ -951,9 +950,8 @@ flat_multimap(_KeyContainer, _MappedContainer, _Allocator)
_MappedContainer>;
template <class _KeyContainer, class _MappedContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> &&
- uses_allocator_v<_MappedContainer, _Allocator> &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
+ uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -965,8 +963,7 @@ flat_multimap(_KeyContainer, _MappedContainer, _Compare, _Allocator)
_MappedContainer>;
template <class _KeyContainer, class _MappedContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -979,7 +976,7 @@ flat_multimap(sorted_equivalent_t, _KeyContainer, _MappedContainer, _Compare = _
template <class _KeyContainer, class _MappedContainer, class _Allocator>
requires(uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
- !__is_allocator<_KeyContainer>::value && !__is_allocator<_MappedContainer>::value)
+ !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer>)
flat_multimap(sorted_equivalent_t, _KeyContainer, _MappedContainer, _Allocator)
-> flat_multimap<typename _KeyContainer::value_type,
typename _MappedContainer::value_type,
@@ -988,9 +985,8 @@ flat_multimap(sorted_equivalent_t, _KeyContainer, _MappedContainer, _Allocator)
_MappedContainer>;
template <class _KeyContainer, class _MappedContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
- !__is_allocator<_MappedContainer>::value && uses_allocator_v<_KeyContainer, _Allocator> &&
- uses_allocator_v<_MappedContainer, _Allocator> &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> && !__is_allocator_v<_MappedContainer> &&
+ uses_allocator_v<_KeyContainer, _Allocator> && uses_allocator_v<_MappedContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -1002,19 +998,19 @@ flat_multimap(sorted_equivalent_t, _KeyContainer, _MappedContainer, _Compare, _A
_MappedContainer>;
template <class _InputIterator, class _Compare = less<__iter_key_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_multimap(_InputIterator, _InputIterator, _Compare = _Compare())
-> flat_multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>;
template <class _InputIterator, class _Compare = less<__iter_key_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_multimap(sorted_equivalent_t, _InputIterator, _InputIterator, _Compare = _Compare())
-> flat_multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare>;
template <ranges::input_range _Range,
class _Compare = less<__range_key_type<_Range>>,
class _Allocator = allocator<byte>,
- class = __enable_if_t<!__is_allocator<_Compare>::value && __is_allocator<_Allocator>::value>>
+ class = __enable_if_t<!__is_allocator_v<_Compare> && __is_allocator_v<_Allocator>>>
flat_multimap(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_multimap<
__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -1022,7 +1018,7 @@ flat_multimap(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Alloc
vector<__range_key_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_key_type<_Range>>>,
vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>;
-template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator_v<_Allocator>>>
flat_multimap(from_range_t, _Range&&, _Allocator) -> flat_multimap<
__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -1031,11 +1027,11 @@ flat_multimap(from_range_t, _Range&&, _Allocator) -> flat_multimap<
vector<__range_mapped_type<_Range>, __allocator_traits_rebind_t<_Allocator, __range_mapped_type<_Range>>>>;
template <class _Key, class _Tp, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_multimap(initializer_list<pair<_Key, _Tp>>, _Compare = _Compare()) -> flat_multimap<_Key, _Tp, _Compare>;
template <class _Key, class _Tp, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_multimap(sorted_equivalent_t, initializer_list<pair<_Key, _Tp>>, _Compare = _Compare())
-> flat_multimap<_Key, _Tp, _Compare>;
diff --git a/libcxx/include/__flat_set/flat_multiset.h b/libcxx/include/__flat_set/flat_multiset.h
index 44d8af05a56af..65f4161a8c34c 100644
--- a/libcxx/include/__flat_set/flat_multiset.h
+++ b/libcxx/include/__flat_set/flat_multiset.h
@@ -689,7 +689,7 @@ class flat_multiset {
};
template <class _KeyContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -697,12 +697,12 @@ flat_multiset(_KeyContainer, _Compare = _Compare())
-> flat_multiset<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _KeyContainer, class _Allocator>
- requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value)
+ requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>)
flat_multiset(_KeyContainer, _Allocator)
-> flat_multiset<typename _KeyContainer::value_type, less<typename _KeyContainer::value_type>, _KeyContainer>;
template <class _KeyContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
uses_allocator_v<_KeyContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
@@ -711,7 +711,7 @@ flat_multiset(_KeyContainer, _Compare, _Allocator)
-> flat_multiset<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _KeyContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -719,12 +719,12 @@ flat_multiset(sorted_equivalent_t, _KeyContainer, _Compare = _Compare())
-> flat_multiset<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _KeyContainer, class _Allocator>
- requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value)
+ requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>)
flat_multiset(sorted_equivalent_t, _KeyContainer, _Allocator)
-> flat_multiset<typename _KeyContainer::value_type, less<typename _KeyContainer::value_type>, _KeyContainer>;
template <class _KeyContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
uses_allocator_v<_KeyContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
@@ -733,36 +733,36 @@ flat_multiset(sorted_equivalent_t, _KeyContainer, _Compare, _Allocator)
-> flat_multiset<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _InputIterator, class _Compare = less<__iter_value_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_multiset(_InputIterator, _InputIterator, _Compare = _Compare())
-> flat_multiset<__iter_value_type<_InputIterator>, _Compare>;
template <class _InputIterator, class _Compare = less<__iter_value_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_multiset(sorted_equivalent_t, _InputIterator, _InputIterator, _Compare = _Compare())
-> flat_multiset<__iter_value_type<_InputIterator>, _Compare>;
template <ranges::input_range _Range,
class _Compare = less<ranges::range_value_t<_Range>>,
class _Allocator = allocator<ranges::range_value_t<_Range>>,
- class = __enable_if_t<!__is_allocator<_Compare>::value && __is_allocator<_Allocator>::value>>
+ class = __enable_if_t<!__is_allocator_v<_Compare> && __is_allocator_v<_Allocator>>>
flat_multiset(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_multiset<
ranges::range_value_t<_Range>,
_Compare,
vector<ranges::range_value_t<_Range>, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>;
-template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator_v<_Allocator>>>
flat_multiset(from_range_t, _Range&&, _Allocator) -> flat_multiset<
ranges::range_value_t<_Range>,
less<ranges::range_value_t<_Range>>,
vector<ranges::range_value_t<_Range>, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>;
template <class _Key, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_multiset(initializer_list<_Key>, _Compare = _Compare()) -> flat_multiset<_Key, _Compare>;
template <class _Key, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_multiset(sorted_equivalent_t, initializer_list<_Key>, _Compare = _Compare()) -> flat_multiset<_Key, _Compare>;
template <class _Key, class _Compare, class _KeyContainer, class _Allocator>
diff --git a/libcxx/include/__flat_set/flat_set.h b/libcxx/include/__flat_set/flat_set.h
index 95cb998459bc8..cc788bda544de 100644
--- a/libcxx/include/__flat_set/flat_set.h
+++ b/libcxx/include/__flat_set/flat_set.h
@@ -774,19 +774,19 @@ class flat_set {
};
template <class _KeyContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
flat_set(_KeyContainer, _Compare = _Compare()) -> flat_set<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _KeyContainer, class _Allocator>
- requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value)
+ requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>)
flat_set(_KeyContainer, _Allocator)
-> flat_set<typename _KeyContainer::value_type, less<typename _KeyContainer::value_type>, _KeyContainer>;
template <class _KeyContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
uses_allocator_v<_KeyContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
@@ -794,7 +794,7 @@ template <class _KeyContainer, class _Compare, class _Allocator>
flat_set(_KeyContainer, _Compare, _Allocator) -> flat_set<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _KeyContainer, class _Compare = less<typename _KeyContainer::value_type>>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
const typename _KeyContainer::value_type&>)
@@ -802,12 +802,12 @@ flat_set(sorted_unique_t, _KeyContainer, _Compare = _Compare())
-> flat_set<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _KeyContainer, class _Allocator>
- requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator<_KeyContainer>::value)
+ requires(uses_allocator_v<_KeyContainer, _Allocator> && !__is_allocator_v<_KeyContainer>)
flat_set(sorted_unique_t, _KeyContainer, _Allocator)
-> flat_set<typename _KeyContainer::value_type, less<typename _KeyContainer::value_type>, _KeyContainer>;
template <class _KeyContainer, class _Compare, class _Allocator>
- requires(!__is_allocator<_Compare>::value && !__is_allocator<_KeyContainer>::value &&
+ requires(!__is_allocator_v<_Compare> && !__is_allocator_v<_KeyContainer> &&
uses_allocator_v<_KeyContainer, _Allocator> &&
is_invocable_v<const _Compare&,
const typename _KeyContainer::value_type&,
@@ -816,36 +816,36 @@ flat_set(sorted_unique_t, _KeyContainer, _Compare, _Allocator)
-> flat_set<typename _KeyContainer::value_type, _Compare, _KeyContainer>;
template <class _InputIterator, class _Compare = less<__iter_value_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_set(_InputIterator, _InputIterator, _Compare = _Compare())
-> flat_set<__iter_value_type<_InputIterator>, _Compare>;
template <class _InputIterator, class _Compare = less<__iter_value_type<_InputIterator>>>
- requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator<_Compare>::value)
+ requires(__has_input_iterator_category<_InputIterator>::value && !__is_allocator_v<_Compare>)
flat_set(sorted_unique_t, _InputIterator, _InputIterator, _Compare = _Compare())
-> flat_set<__iter_value_type<_InputIterator>, _Compare>;
template <ranges::input_range _Range,
class _Compare = less<ranges::range_value_t<_Range>>,
class _Allocator = allocator<ranges::range_value_t<_Range>>,
- class = __enable_if_t<!__is_allocator<_Compare>::value && __is_allocator<_Allocator>::value>>
+ class = __enable_if_t<!__is_allocator_v<_Compare> && __is_allocator_v<_Allocator>>>
flat_set(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator()) -> flat_set<
ranges::range_value_t<_Range>,
_Compare,
vector<ranges::range_value_t<_Range>, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>;
-template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = __enable_if_t<__is_allocator_v<_Allocator>>>
flat_set(from_range_t, _Range&&, _Allocator) -> flat_set<
ranges::range_value_t<_Range>,
less<ranges::range_value_t<_Range>>,
vector<ranges::range_value_t<_Range>, __allocator_traits_rebind_t<_Allocator, ranges::range_value_t<_Range>>>>;
template <class _Key, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_set(initializer_list<_Key>, _Compare = _Compare()) -> flat_set<_Key, _Compare>;
template <class _Key, class _Compare = less<_Key>>
- requires(!__is_allocator<_Compare>::value)
+ requires(!__is_allocator_v<_Compare>)
flat_set(sorted_unique_t, initializer_list<_Key>, _Compare = _Compare()) -> flat_set<_Key, _Compare>;
template <class _Key, class _Compare, class _KeyContainer, class _Allocator>
diff --git a/libcxx/include/__type_traits/is_allocator.h b/libcxx/include/__type_traits/is_allocator.h
index 191eeb9a1f522..f37c029a2aa89 100644
--- a/libcxx/include/__type_traits/is_allocator.h
+++ b/libcxx/include/__type_traits/is_allocator.h
@@ -11,7 +11,6 @@
#include <__config>
#include <__cstddef/size_t.h>
-#include <__type_traits/integral_constant.h>
#include <__type_traits/void_t.h>
#include <__utility/declval.h>
@@ -21,13 +20,13 @@
_LIBCPP_BEGIN_NAMESPACE_STD
-template <typename _Alloc, typename = void, typename = void>
-struct __is_allocator : false_type {};
+template <class _Alloc, class = void, class = void>
+inline const bool __is_allocator_v = false;
-template <typename _Alloc>
-struct __is_allocator<_Alloc,
- __void_t<typename _Alloc::value_type>,
- __void_t<decltype(std::declval<_Alloc&>().allocate(size_t(0)))> > : true_type {};
+template <class _Alloc>
+inline const bool __is_allocator_v<_Alloc,
+ __void_t<typename _Alloc::value_type>,
+ __void_t<decltype(std::declval<_Alloc&>().allocate(size_t()))> > = true;
_LIBCPP_END_NAMESPACE_STD
diff --git a/libcxx/include/__vector/vector.h b/libcxx/include/__vector/vector.h
index a69aa9145e638..707aff3e7c3d3 100644
--- a/libcxx/include/__vector/vector.h
+++ b/libcxx/include/__vector/vector.h
@@ -176,7 +176,7 @@ class vector {
__guard.__complete();
}
- template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0>
+ template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0>
_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI
vector(size_type __n, const value_type& __x, const allocator_type& __a)
: __alloc_(__a) {
@@ -846,20 +846,20 @@ class vector {
template <class _InputIterator,
class _Alloc = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
vector(_InputIterator, _InputIterator) -> vector<__iter_value_type<_InputIterator>, _Alloc>;
template <class _InputIterator,
class _Alloc,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
vector(_InputIterator, _InputIterator, _Alloc) -> vector<__iter_value_type<_InputIterator>, _Alloc>;
#endif
#if _LIBCPP_STD_VER >= 23
template <ranges::input_range _Range,
class _Alloc = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
vector(from_range_t, _Range&&, _Alloc = _Alloc()) -> vector<ranges::range_value_t<_Range>, _Alloc>;
#endif
diff --git a/libcxx/include/deque b/libcxx/include/deque
index 98d1dbbddb7e8..cfb64b4f07332 100644
--- a/libcxx/include/deque
+++ b/libcxx/include/deque
@@ -637,7 +637,7 @@ public:
# endif
_LIBCPP_HIDE_FROM_ABI deque(size_type __n, const value_type& __v);
- template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0>
+ template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0>
_LIBCPP_HIDE_FROM_ABI deque(size_type __n, const value_type& __v, const allocator_type& __a)
: __map_(__pointer_allocator(__a)), __start_(0), __size_(0), __alloc_(__a) {
__annotate_new(0);
@@ -1260,20 +1260,20 @@ _LIBCPP_CONSTEXPR const typename allocator_traits<_Alloc>::difference_type deque
template <class _InputIterator,
class _Alloc = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
deque(_InputIterator, _InputIterator) -> deque<__iter_value_type<_InputIterator>, _Alloc>;
template <class _InputIterator,
class _Alloc,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
deque(_InputIterator, _InputIterator, _Alloc) -> deque<__iter_value_type<_InputIterator>, _Alloc>;
# endif
# if _LIBCPP_STD_VER >= 23
template <ranges::input_range _Range,
class _Alloc = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
deque(from_range_t, _Range&&, _Alloc = _Alloc()) -> deque<ranges::range_value_t<_Range>, _Alloc>;
# endif
diff --git a/libcxx/include/forward_list b/libcxx/include/forward_list
index 6daa7fbbc03c2..0a0bfa7a7f037 100644
--- a/libcxx/include/forward_list
+++ b/libcxx/include/forward_list
@@ -680,7 +680,7 @@ public:
# endif
_LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI forward_list(size_type __n, const value_type& __v);
- template <__enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
+ template <__enable_if_t<__is_allocator_v<_Alloc>, int> = 0>
_LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI
forward_list(size_type __n, const value_type& __v, const allocator_type& __a)
: __base(__a) {
@@ -920,20 +920,20 @@ private:
template <class _InputIterator,
class _Alloc = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
forward_list(_InputIterator, _InputIterator) -> forward_list<__iter_value_type<_InputIterator>, _Alloc>;
template <class _InputIterator,
class _Alloc,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
forward_list(_InputIterator, _InputIterator, _Alloc) -> forward_list<__iter_value_type<_InputIterator>, _Alloc>;
# endif
# if _LIBCPP_STD_VER >= 23
template <ranges::input_range _Range,
class _Alloc = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
forward_list(from_range_t, _Range&&, _Alloc = _Alloc()) -> forward_list<ranges::range_value_t<_Range>, _Alloc>;
# endif
diff --git a/libcxx/include/list b/libcxx/include/list
index 2896231203d9b..5d8067545b9c7 100644
--- a/libcxx/include/list
+++ b/libcxx/include/list
@@ -724,7 +724,7 @@ public:
_LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI explicit list(size_type __n, const allocator_type& __a);
# endif
_LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI list(size_type __n, const value_type& __x);
- template <__enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
+ template <__enable_if_t<__is_allocator_v<_Alloc>, int> = 0>
_LIBCPP_CONSTEXPR_SINCE_CXX26 _LIBCPP_HIDE_FROM_ABI
list(size_type __n, const value_type& __x, const allocator_type& __a)
: __base(__a) {
@@ -1002,20 +1002,20 @@ private:
template <class _InputIterator,
class _Alloc = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
list(_InputIterator, _InputIterator) -> list<__iter_value_type<_InputIterator>, _Alloc>;
template <class _InputIterator,
class _Alloc,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
list(_InputIterator, _InputIterator, _Alloc) -> list<__iter_value_type<_InputIterator>, _Alloc>;
# endif
# if _LIBCPP_STD_VER >= 23
template <ranges::input_range _Range,
class _Alloc = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<__is_allocator<_Alloc>::value> >
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
list(from_range_t, _Range&&, _Alloc = _Alloc()) -> list<ranges::range_value_t<_Range>, _Alloc>;
# endif
diff --git a/libcxx/include/map b/libcxx/include/map
index 5f906bb0106c1..a63dfec910aae 100644
--- a/libcxx/include/map
+++ b/libcxx/include/map
@@ -1332,8 +1332,8 @@ template <class _InputIterator,
class _Compare = less<__iter_key_type<_InputIterator>>,
class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
map(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator())
-> map<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare, _Allocator>;
@@ -1341,8 +1341,8 @@ map(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocat
template <ranges::input_range _Range,
class _Compare = less<__range_key_type<_Range>>,
class _Allocator = allocator<__range_to_alloc_type<_Range>>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
map(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator())
-> map<__range_key_type<_Range>, __range_mapped_type<_Range>, _Compare, _Allocator>;
# endif
@@ -1351,15 +1351,15 @@ template <class _Key,
class _Tp,
class _Compare = less<remove_const_t<_Key>>,
class _Allocator = allocator<pair<const _Key, _Tp>>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
map(initializer_list<pair<_Key, _Tp>>, _Compare = _Compare(), _Allocator = _Allocator())
-> map<remove_const_t<_Key>, _Tp, _Compare, _Allocator>;
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
map(_InputIterator, _InputIterator, _Allocator)
-> map<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -1367,12 +1367,12 @@ map(_InputIterator, _InputIterator, _Allocator)
_Allocator>;
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
map(from_range_t, _Range&&, _Allocator)
-> map<__range_key_type<_Range>, __range_mapped_type<_Range>, less<__range_key_type<_Range>>, _Allocator>;
# endif
-template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
map(initializer_list<pair<_Key, _Tp>>, _Allocator)
-> map<remove_const_t<_Key>, _Tp, less<remove_const_t<_Key>>, _Allocator>;
# endif
@@ -1889,8 +1889,8 @@ template <class _InputIterator,
class _Compare = less<__iter_key_type<_InputIterator>>,
class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
multimap(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator())
-> multimap<__iter_key_type<_InputIterator>, __iter_mapped_type<_InputIterator>, _Compare, _Allocator>;
@@ -1898,8 +1898,8 @@ multimap(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Al
template <ranges::input_range _Range,
class _Compare = less<__range_key_type<_Range>>,
class _Allocator = allocator<__range_to_alloc_type<_Range>>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
multimap(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator())
-> multimap<__range_key_type<_Range>, __range_mapped_type<_Range>, _Compare, _Allocator>;
# endif
@@ -1908,15 +1908,15 @@ template <class _Key,
class _Tp,
class _Compare = less<remove_const_t<_Key>>,
class _Allocator = allocator<pair<const _Key, _Tp>>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
multimap(initializer_list<pair<_Key, _Tp>>, _Compare = _Compare(), _Allocator = _Allocator())
-> multimap<remove_const_t<_Key>, _Tp, _Compare, _Allocator>;
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
multimap(_InputIterator, _InputIterator, _Allocator)
-> multimap<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -1924,12 +1924,12 @@ multimap(_InputIterator, _InputIterator, _Allocator)
_Allocator>;
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
multimap(from_range_t, _Range&&, _Allocator)
-> multimap<__range_key_type<_Range>, __range_mapped_type<_Range>, less<__range_key_type<_Range>>, _Allocator>;
# endif
-template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
multimap(initializer_list<pair<_Key, _Tp>>, _Allocator)
-> multimap<remove_const_t<_Key>, _Tp, less<remove_const_t<_Key>>, _Allocator>;
# endif
diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in
index ad7046082bc5b..dc1933324ef79 100644
--- a/libcxx/include/module.modulemap.in
+++ b/libcxx/include/module.modulemap.in
@@ -103,10 +103,7 @@ module std_core [system] {
header "__type_traits/is_aggregate.h"
export std_core.type_traits.integral_constant
}
- module is_allocator {
- header "__type_traits/is_allocator.h"
- export std_core.type_traits.integral_constant
- }
+ module is_allocator { header "__type_traits/is_allocator.h" }
module is_always_bitcastable {
header "__type_traits/is_always_bitcastable.h"
export std_core.type_traits.integral_constant
diff --git a/libcxx/include/queue b/libcxx/include/queue
index c33afc892dda8..65936250c66a1 100644
--- a/libcxx/include/queue
+++ b/libcxx/include/queue
@@ -437,12 +437,12 @@ public:
};
# if _LIBCPP_STD_VER >= 17
-template <class _Container, class = enable_if_t<!__is_allocator<_Container>::value> >
+template <class _Container, class = enable_if_t<!__is_allocator_v<_Container>>>
queue(_Container) -> queue<typename _Container::value_type, _Container>;
template <class _Container,
class _Alloc,
- class = enable_if_t<!__is_allocator<_Container>::value>,
+ class = enable_if_t<!__is_allocator_v<_Container>>,
class = enable_if_t<uses_allocator<_Container, _Alloc>::value> >
queue(_Container, _Alloc) -> queue<typename _Container::value_type, _Container>;
# endif
@@ -457,11 +457,11 @@ queue(from_range_t, _Range&&) -> queue<ranges::range_value_t<_Range>>;
template <class _InputIterator,
class _Alloc,
__enable_if_t<__has_input_iterator_category<_InputIterator>::value, int> = 0,
- __enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
+ __enable_if_t<__is_allocator_v<_Alloc>, int> = 0>
queue(_InputIterator, _InputIterator, _Alloc)
-> queue<__iter_value_type<_InputIterator>, deque<__iter_value_type<_InputIterator>, _Alloc>>;
-template <ranges::input_range _Range, class _Alloc, __enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
+template <ranges::input_range _Range, class _Alloc, __enable_if_t<__is_allocator_v<_Alloc>, int> = 0>
queue(from_range_t, _Range&&, _Alloc)
-> queue<ranges::range_value_t<_Range>, deque<ranges::range_value_t<_Range>, _Alloc>>;
# endif
@@ -700,31 +700,31 @@ public:
# if _LIBCPP_STD_VER >= 17
template <class _Compare,
class _Container,
- class = enable_if_t<!__is_allocator<_Compare>::value>,
- class = enable_if_t<!__is_allocator<_Container>::value> >
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<!__is_allocator_v<_Container>>>
priority_queue(_Compare, _Container) -> priority_queue<typename _Container::value_type, _Container, _Compare>;
template <class _InputIterator,
class _Compare = less<__iter_value_type<_InputIterator>>,
class _Container = vector<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Compare>::value>,
- class = enable_if_t<!__is_allocator<_Container>::value> >
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<!__is_allocator_v<_Container>>>
priority_queue(_InputIterator, _InputIterator, _Compare = _Compare(), _Container = _Container())
-> priority_queue<__iter_value_type<_InputIterator>, _Container, _Compare>;
template <class _Compare,
class _Container,
class _Alloc,
- class = enable_if_t<!__is_allocator<_Compare>::value>,
- class = enable_if_t<!__is_allocator<_Container>::value>,
- class = enable_if_t<uses_allocator<_Container, _Alloc>::value> >
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<!__is_allocator_v<_Container>>,
+ class = enable_if_t<uses_allocator<_Container, _Alloc>::value>>
priority_queue(_Compare, _Container, _Alloc) -> priority_queue<typename _Container::value_type, _Container, _Compare>;
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value> >
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
priority_queue(_InputIterator, _InputIterator, _Allocator)
-> priority_queue<__iter_value_type<_InputIterator>,
vector<__iter_value_type<_InputIterator>, _Allocator>,
@@ -734,8 +734,8 @@ template <class _InputIterator,
class _Compare,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Compare>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value> >
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
priority_queue(_InputIterator, _InputIterator, _Compare, _Allocator)
-> priority_queue<__iter_value_type<_InputIterator>,
vector<__iter_value_type<_InputIterator>, _Allocator>,
@@ -746,8 +746,8 @@ template <class _InputIterator,
class _Container,
class _Alloc,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Compare>::value>,
- class = enable_if_t<!__is_allocator<_Container>::value>,
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<!__is_allocator_v<_Container>>,
class = enable_if_t<uses_allocator<_Container, _Alloc>::value> >
priority_queue(_InputIterator, _InputIterator, _Compare, _Container, _Alloc)
-> priority_queue<typename _Container::value_type, _Container, _Compare>;
@@ -757,19 +757,19 @@ priority_queue(_InputIterator, _InputIterator, _Compare, _Container, _Alloc)
template <ranges::input_range _Range,
class _Compare = less<ranges::range_value_t<_Range>>,
- class = enable_if_t<!__is_allocator<_Compare>::value>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>>
priority_queue(from_range_t, _Range&&, _Compare = _Compare())
-> priority_queue<ranges::range_value_t<_Range>, vector<ranges::range_value_t<_Range>>, _Compare>;
template <ranges::input_range _Range,
class _Compare,
class _Alloc,
- class = enable_if_t<!__is_allocator<_Compare>::value>,
- class = enable_if_t<__is_allocator<_Alloc>::value>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Alloc>>>
priority_queue(from_range_t, _Range&&, _Compare, _Alloc)
-> priority_queue<ranges::range_value_t<_Range>, vector<ranges::range_value_t<_Range>, _Alloc>, _Compare>;
-template <ranges::input_range _Range, class _Alloc, class = enable_if_t<__is_allocator<_Alloc>::value>>
+template <ranges::input_range _Range, class _Alloc, class = enable_if_t<__is_allocator_v<_Alloc>>>
priority_queue(from_range_t, _Range&&, _Alloc)
-> priority_queue<ranges::range_value_t<_Range>, vector<ranges::range_value_t<_Range>, _Alloc>>;
diff --git a/libcxx/include/set b/libcxx/include/set
index 81c3de7343ee5..75529e7bac6ff 100644
--- a/libcxx/include/set
+++ b/libcxx/include/set
@@ -899,8 +899,8 @@ template <class _InputIterator,
class _Compare = less<__iter_value_type<_InputIterator>>,
class _Allocator = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>,
+ class = enable_if_t<!__is_allocator_v<_Compare>>>
set(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator())
-> set<__iter_value_type<_InputIterator>, _Compare, _Allocator>;
@@ -908,8 +908,8 @@ set(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocat
template <ranges::input_range _Range,
class _Compare = less<ranges::range_value_t<_Range>>,
class _Allocator = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>,
+ class = enable_if_t<!__is_allocator_v<_Compare>>>
set(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator())
-> set<ranges::range_value_t<_Range>, _Compare, _Allocator>;
# endif
@@ -917,24 +917,24 @@ set(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator())
template <class _Key,
class _Compare = less<_Key>,
class _Allocator = allocator<_Key>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<!__is_allocator_v<_Compare>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
set(initializer_list<_Key>, _Compare = _Compare(), _Allocator = _Allocator()) -> set<_Key, _Compare, _Allocator>;
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
set(_InputIterator, _InputIterator, _Allocator)
-> set<__iter_value_type<_InputIterator>, less<__iter_value_type<_InputIterator>>, _Allocator>;
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
set(from_range_t, _Range&&, _Allocator)
-> set<ranges::range_value_t<_Range>, less<ranges::range_value_t<_Range>>, _Allocator>;
# endif
-template <class _Key, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <class _Key, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
set(initializer_list<_Key>, _Allocator) -> set<_Key, less<_Key>, _Allocator>;
# endif
@@ -1351,8 +1351,8 @@ template <class _InputIterator,
class _Compare = less<__iter_value_type<_InputIterator>>,
class _Allocator = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>,
+ class = enable_if_t<!__is_allocator_v<_Compare>>>
multiset(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Allocator())
-> multiset<__iter_value_type<_InputIterator>, _Compare, _Allocator>;
@@ -1360,8 +1360,8 @@ multiset(_InputIterator, _InputIterator, _Compare = _Compare(), _Allocator = _Al
template <ranges::input_range _Range,
class _Compare = less<ranges::range_value_t<_Range>>,
class _Allocator = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>,
+ class = enable_if_t<!__is_allocator_v<_Compare>>>
multiset(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator())
-> multiset<ranges::range_value_t<_Range>, _Compare, _Allocator>;
# endif
@@ -1369,25 +1369,25 @@ multiset(from_range_t, _Range&&, _Compare = _Compare(), _Allocator = _Allocator(
template <class _Key,
class _Compare = less<_Key>,
class _Allocator = allocator<_Key>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>,
- class = enable_if_t<!__is_allocator<_Compare>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>,
+ class = enable_if_t<!__is_allocator_v<_Compare>>>
multiset(initializer_list<_Key>, _Compare = _Compare(), _Allocator = _Allocator())
-> multiset<_Key, _Compare, _Allocator>;
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value, void>,
- class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
multiset(_InputIterator, _InputIterator, _Allocator)
-> multiset<__iter_value_type<_InputIterator>, less<__iter_value_type<_InputIterator>>, _Allocator>;
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
multiset(from_range_t, _Range&&, _Allocator)
-> multiset<ranges::range_value_t<_Range>, less<ranges::range_value_t<_Range>>, _Allocator>;
# endif
-template <class _Key, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value, void>>
+template <class _Key, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
multiset(initializer_list<_Key>, _Allocator) -> multiset<_Key, less<_Key>, _Allocator>;
# endif
diff --git a/libcxx/include/sstream b/libcxx/include/sstream
index 682a28fd4dbb8..c42dbff9eee5f 100644
--- a/libcxx/include/sstream
+++ b/libcxx/include/sstream
@@ -484,7 +484,7 @@ public:
# if _LIBCPP_STD_VER >= 20
template <class _SAlloc>
- requires __is_allocator<_SAlloc>::value
+ requires __is_allocator_v<_SAlloc>
_LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const {
return basic_string<_CharT, _Traits, _SAlloc>(view(), __sa);
}
@@ -963,7 +963,7 @@ public:
# if _LIBCPP_STD_VER >= 20
template <class _SAlloc>
- requires __is_allocator<_SAlloc>::value
+ requires __is_allocator_v<_SAlloc>
_LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const {
return __sb_.str(__sa);
}
@@ -1101,7 +1101,7 @@ public:
# if _LIBCPP_STD_VER >= 20
template <class _SAlloc>
- requires __is_allocator<_SAlloc>::value
+ requires __is_allocator_v<_SAlloc>
_LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const {
return __sb_.str(__sa);
}
@@ -1241,7 +1241,7 @@ public:
# if _LIBCPP_STD_VER >= 20
template <class _SAlloc>
- requires __is_allocator<_SAlloc>::value
+ requires __is_allocator_v<_SAlloc>
_LIBCPP_HIDE_FROM_ABI basic_string<char_type, traits_type, _SAlloc> str(const _SAlloc& __sa) const {
return __sb_.str(__sa);
}
diff --git a/libcxx/include/stack b/libcxx/include/stack
index 985813fcf578a..3d7187ddb1630 100644
--- a/libcxx/include/stack
+++ b/libcxx/include/stack
@@ -294,12 +294,12 @@ public:
};
# if _LIBCPP_STD_VER >= 17
-template <class _Container, class = enable_if_t<!__is_allocator<_Container>::value> >
+template <class _Container, class = enable_if_t<!__is_allocator_v<_Container>>>
stack(_Container) -> stack<typename _Container::value_type, _Container>;
template <class _Container,
class _Alloc,
- class = enable_if_t<!__is_allocator<_Container>::value>,
+ class = enable_if_t<!__is_allocator_v<_Container>>,
class = enable_if_t<uses_allocator<_Container, _Alloc>::value> >
stack(_Container, _Alloc) -> stack<typename _Container::value_type, _Container>;
# endif
@@ -314,11 +314,11 @@ stack(from_range_t, _Range&&) -> stack<ranges::range_value_t<_Range>>;
template <class _InputIterator,
class _Alloc,
__enable_if_t<__has_input_iterator_category<_InputIterator>::value, int> = 0,
- __enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
+ __enable_if_t<__is_allocator_v<_Alloc>, int> = 0>
stack(_InputIterator, _InputIterator, _Alloc)
-> stack<__iter_value_type<_InputIterator>, deque<__iter_value_type<_InputIterator>, _Alloc>>;
-template <ranges::input_range _Range, class _Alloc, __enable_if_t<__is_allocator<_Alloc>::value, int> = 0>
+template <ranges::input_range _Range, class _Alloc, __enable_if_t<__is_allocator_v<_Alloc>, int> = 0>
stack(from_range_t, _Range&&, _Alloc)
-> stack<ranges::range_value_t<_Range>, deque<ranges::range_value_t<_Range>, _Alloc>>;
diff --git a/libcxx/include/string b/libcxx/include/string
index 081467edfe3fb..ea5afc09d0b08 100644
--- a/libcxx/include/string
+++ b/libcxx/include/string
@@ -1055,13 +1055,13 @@ public:
}
# endif // _LIBCPP_CXX03_LANG
- template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0>
+ template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(const _CharT* _LIBCPP_DIAGNOSE_NULLPTR __s) {
_LIBCPP_ASSERT_NON_NULL(__s != nullptr, "basic_string(const char*) detected nullptr");
__init(__s, traits_type::length(__s));
}
- template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0>
+ template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20
basic_string(const _CharT* _LIBCPP_DIAGNOSE_NULLPTR __s, const _Allocator& __a)
: __alloc_(__a) {
@@ -1110,7 +1110,7 @@ public:
}
# endif
- template <__enable_if_t<__is_allocator<_Allocator>::value, int> = 0>
+ template <__enable_if_t<__is_allocator_v<_Allocator>, int> = 0>
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 basic_string(size_type __n, _CharT __c, const _Allocator& __a)
: __alloc_(__a) {
__init(__n, __c);
@@ -2565,21 +2565,21 @@ template <class _InputIterator,
class _CharT = __iter_value_type<_InputIterator>,
class _Allocator = allocator<_CharT>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value> >
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
basic_string(_InputIterator, _InputIterator, _Allocator = _Allocator())
-> basic_string<_CharT, char_traits<_CharT>, _Allocator>;
template <class _CharT,
class _Traits,
class _Allocator = allocator<_CharT>,
- class = enable_if_t<__is_allocator<_Allocator>::value> >
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
explicit basic_string(basic_string_view<_CharT, _Traits>, const _Allocator& = _Allocator())
-> basic_string<_CharT, _Traits, _Allocator>;
template <class _CharT,
class _Traits,
class _Allocator = allocator<_CharT>,
- class = enable_if_t<__is_allocator<_Allocator>::value>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>,
class _Sz = typename allocator_traits<_Allocator>::size_type >
basic_string(basic_string_view<_CharT, _Traits>, _Sz, _Sz, const _Allocator& = _Allocator())
-> basic_string<_CharT, _Traits, _Allocator>;
@@ -2588,7 +2588,7 @@ basic_string(basic_string_view<_CharT, _Traits>, _Sz, _Sz, const _Allocator& = _
# if _LIBCPP_STD_VER >= 23
template <ranges::input_range _Range,
class _Allocator = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<__is_allocator<_Allocator>::value> >
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
basic_string(from_range_t, _Range&&, _Allocator = _Allocator())
-> basic_string<ranges::range_value_t<_Range>, char_traits<ranges::range_value_t<_Range>>, _Allocator>;
# endif
diff --git a/libcxx/include/unordered_map b/libcxx/include/unordered_map
index 43a2245c5acc0..2afc8805cb4c7 100644
--- a/libcxx/include/unordered_map
+++ b/libcxx/include/unordered_map
@@ -1297,10 +1297,10 @@ template <class _InputIterator,
class _Pred = equal_to<__iter_key_type<_InputIterator>>,
class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(_InputIterator,
_InputIterator,
typename allocator_traits<_Allocator>::size_type = 0,
@@ -1314,10 +1314,10 @@ template <ranges::input_range _Range,
class _Hash = hash<__range_key_type<_Range>>,
class _Pred = equal_to<__range_key_type<_Range>>,
class _Allocator = allocator<__range_to_alloc_type<_Range>>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(from_range_t,
_Range&&,
typename allocator_traits<_Allocator>::size_type = 0,
@@ -1332,10 +1332,10 @@ template <class _Key,
class _Hash = hash<remove_const_t<_Key>>,
class _Pred = equal_to<remove_const_t<_Key>>,
class _Allocator = allocator<pair<const _Key, _Tp>>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(initializer_list<pair<_Key, _Tp>>,
typename allocator_traits<_Allocator>::size_type = 0,
_Hash = _Hash(),
@@ -1345,7 +1345,7 @@ unordered_map(initializer_list<pair<_Key, _Tp>>,
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_map<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -1356,7 +1356,7 @@ unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocat
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(_InputIterator, _InputIterator, _Allocator)
-> unordered_map<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -1368,9 +1368,9 @@ template <class _InputIterator,
class _Hash,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_map<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -1380,7 +1380,7 @@ unordered_map(_InputIterator, _InputIterator, typename allocator_traits<_Allocat
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_map<__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -1388,7 +1388,7 @@ unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::siz
equal_to<__range_key_type<_Range>>,
_Allocator>;
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(from_range_t, _Range&&, _Allocator)
-> unordered_map<__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -1399,9 +1399,9 @@ unordered_map(from_range_t, _Range&&, _Allocator)
template <ranges::input_range _Range,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_map<__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -1411,11 +1411,11 @@ unordered_map(from_range_t, _Range&&, typename allocator_traits<_Allocator>::siz
# endif
-template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(initializer_list<pair<_Key, _Tp>>, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_map<remove_const_t<_Key>, _Tp, hash<remove_const_t<_Key>>, equal_to<remove_const_t<_Key>>, _Allocator>;
-template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(initializer_list<pair<_Key, _Tp>>, _Allocator)
-> unordered_map<remove_const_t<_Key>, _Tp, hash<remove_const_t<_Key>>, equal_to<remove_const_t<_Key>>, _Allocator>;
@@ -1423,9 +1423,9 @@ template <class _Key,
class _Tp,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_map(initializer_list<pair<_Key, _Tp>>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_map<remove_const_t<_Key>, _Tp, _Hash, equal_to<remove_const_t<_Key>>, _Allocator>;
# endif
@@ -1992,10 +1992,10 @@ template <class _InputIterator,
class _Pred = equal_to<__iter_key_type<_InputIterator>>,
class _Allocator = allocator<__iter_to_alloc_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(_InputIterator,
_InputIterator,
typename allocator_traits<_Allocator>::size_type = 0,
@@ -2013,10 +2013,10 @@ template <ranges::input_range _Range,
class _Hash = hash<__range_key_type<_Range>>,
class _Pred = equal_to<__range_key_type<_Range>>,
class _Allocator = allocator<__range_to_alloc_type<_Range>>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(from_range_t,
_Range&&,
typename allocator_traits<_Allocator>::size_type = 0,
@@ -2031,10 +2031,10 @@ template <class _Key,
class _Hash = hash<remove_const_t<_Key>>,
class _Pred = equal_to<remove_const_t<_Key>>,
class _Allocator = allocator<pair<const _Key, _Tp>>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(initializer_list<pair<_Key, _Tp>>,
typename allocator_traits<_Allocator>::size_type = 0,
_Hash = _Hash(),
@@ -2045,7 +2045,7 @@ unordered_multimap(initializer_list<pair<_Key, _Tp>>,
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_multimap<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -2056,7 +2056,7 @@ unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Al
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(_InputIterator, _InputIterator, _Allocator)
-> unordered_multimap<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -2068,9 +2068,9 @@ template <class _InputIterator,
class _Hash,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_multimap<__iter_key_type<_InputIterator>,
__iter_mapped_type<_InputIterator>,
@@ -2080,7 +2080,7 @@ unordered_multimap(_InputIterator, _InputIterator, typename allocator_traits<_Al
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_multimap<__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -2088,7 +2088,7 @@ unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator>
equal_to<__range_key_type<_Range>>,
_Allocator>;
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(from_range_t, _Range&&, _Allocator)
-> unordered_multimap<__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -2099,9 +2099,9 @@ unordered_multimap(from_range_t, _Range&&, _Allocator)
template <ranges::input_range _Range,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_multimap<__range_key_type<_Range>,
__range_mapped_type<_Range>,
@@ -2111,7 +2111,7 @@ unordered_multimap(from_range_t, _Range&&, typename allocator_traits<_Allocator>
# endif
-template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(initializer_list<pair<_Key, _Tp>>, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_multimap<remove_const_t<_Key>,
_Tp,
@@ -2119,7 +2119,7 @@ unordered_multimap(initializer_list<pair<_Key, _Tp>>, typename allocator_traits<
equal_to<remove_const_t<_Key>>,
_Allocator>;
-template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <class _Key, class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(initializer_list<pair<_Key, _Tp>>, _Allocator)
-> unordered_multimap<remove_const_t<_Key>,
_Tp,
@@ -2131,9 +2131,9 @@ template <class _Key,
class _Tp,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multimap(
initializer_list<pair<_Key, _Tp>>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_multimap<remove_const_t<_Key>, _Tp, _Hash, equal_to<remove_const_t<_Key>>, _Allocator>;
diff --git a/libcxx/include/unordered_set b/libcxx/include/unordered_set
index c6ee0ffdec6af..6b81fc318e3a1 100644
--- a/libcxx/include/unordered_set
+++ b/libcxx/include/unordered_set
@@ -917,10 +917,10 @@ template <class _InputIterator,
class _Pred = equal_to<__iter_value_type<_InputIterator>>,
class _Allocator = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(_InputIterator,
_InputIterator,
typename allocator_traits<_Allocator>::size_type = 0,
@@ -933,10 +933,10 @@ template <ranges::input_range _Range,
class _Hash = hash<ranges::range_value_t<_Range>>,
class _Pred = equal_to<ranges::range_value_t<_Range>>,
class _Allocator = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(from_range_t,
_Range&&,
typename allocator_traits<_Allocator>::size_type = 0,
@@ -950,10 +950,10 @@ template <class _Tp,
class _Hash = hash<_Tp>,
class _Pred = equal_to<_Tp>,
class _Allocator = allocator<_Tp>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(initializer_list<_Tp>,
typename allocator_traits<_Allocator>::size_type = 0,
_Hash = _Hash(),
@@ -963,7 +963,7 @@ unordered_set(initializer_list<_Tp>,
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_set<__iter_value_type<_InputIterator>,
hash<__iter_value_type<_InputIterator>>,
@@ -974,22 +974,22 @@ template <class _InputIterator,
class _Hash,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_set<__iter_value_type<_InputIterator>, _Hash, equal_to<__iter_value_type<_InputIterator>>, _Allocator>;
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_set<ranges::range_value_t<_Range>,
hash<ranges::range_value_t<_Range>>,
equal_to<ranges::range_value_t<_Range>>,
_Allocator>;
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(from_range_t, _Range&&, _Allocator)
-> unordered_set<ranges::range_value_t<_Range>,
hash<ranges::range_value_t<_Range>>,
@@ -999,24 +999,24 @@ unordered_set(from_range_t, _Range&&, _Allocator)
template <ranges::input_range _Range,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_set<ranges::range_value_t<_Range>, _Hash, equal_to<ranges::range_value_t<_Range>>, _Allocator>;
# endif
-template <class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_set<_Tp, hash<_Tp>, equal_to<_Tp>, _Allocator>;
template <class _Tp,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_set(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_set<_Tp, _Hash, equal_to<_Tp>, _Allocator>;
# endif
@@ -1502,10 +1502,10 @@ template <class _InputIterator,
class _Pred = equal_to<__iter_value_type<_InputIterator>>,
class _Allocator = allocator<__iter_value_type<_InputIterator>>,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(
_InputIterator,
_InputIterator,
@@ -1519,10 +1519,10 @@ template <ranges::input_range _Range,
class _Hash = hash<ranges::range_value_t<_Range>>,
class _Pred = equal_to<ranges::range_value_t<_Range>>,
class _Allocator = allocator<ranges::range_value_t<_Range>>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(
from_range_t,
_Range&&,
@@ -1536,10 +1536,10 @@ template <class _Tp,
class _Hash = hash<_Tp>,
class _Pred = equal_to<_Tp>,
class _Allocator = allocator<_Tp>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<!__is_allocator<_Pred>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<!__is_allocator_v<_Pred>>,
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(initializer_list<_Tp>,
typename allocator_traits<_Allocator>::size_type = 0,
_Hash = _Hash(),
@@ -1549,7 +1549,7 @@ unordered_multiset(initializer_list<_Tp>,
template <class _InputIterator,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_multiset<__iter_value_type<_InputIterator>,
hash<__iter_value_type<_InputIterator>>,
@@ -1560,9 +1560,9 @@ template <class _InputIterator,
class _Hash,
class _Allocator,
class = enable_if_t<__has_input_iterator_category<_InputIterator>::value>,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(_InputIterator, _InputIterator, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_multiset<__iter_value_type<_InputIterator>,
_Hash,
@@ -1571,14 +1571,14 @@ unordered_multiset(_InputIterator, _InputIterator, typename allocator_traits<_Al
# if _LIBCPP_STD_VER >= 23
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_multiset<ranges::range_value_t<_Range>,
hash<ranges::range_value_t<_Range>>,
equal_to<ranges::range_value_t<_Range>>,
_Allocator>;
-template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <ranges::input_range _Range, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(from_range_t, _Range&&, _Allocator)
-> unordered_multiset<ranges::range_value_t<_Range>,
hash<ranges::range_value_t<_Range>>,
@@ -1588,24 +1588,24 @@ unordered_multiset(from_range_t, _Range&&, _Allocator)
template <ranges::input_range _Range,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(from_range_t, _Range&&, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_multiset<ranges::range_value_t<_Range>, _Hash, equal_to<ranges::range_value_t<_Range>>, _Allocator>;
# endif
-template <class _Tp, class _Allocator, class = enable_if_t<__is_allocator<_Allocator>::value>>
+template <class _Tp, class _Allocator, class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Allocator)
-> unordered_multiset<_Tp, hash<_Tp>, equal_to<_Tp>, _Allocator>;
template <class _Tp,
class _Hash,
class _Allocator,
- class = enable_if_t<!__is_allocator<_Hash>::value>,
+ class = enable_if_t<!__is_allocator_v<_Hash>>,
class = enable_if_t<!is_integral<_Hash>::value>,
- class = enable_if_t<__is_allocator<_Allocator>::value>>
+ class = enable_if_t<__is_allocator_v<_Allocator>>>
unordered_multiset(initializer_list<_Tp>, typename allocator_traits<_Allocator>::size_type, _Hash, _Allocator)
-> unordered_multiset<_Tp, _Hash, equal_to<_Tp>, _Allocator>;
# endif
diff --git a/libcxx/test/libcxx/memory/is_allocator.pass.cpp b/libcxx/test/libcxx/memory/is_allocator.pass.cpp
index cf11d077bf086..ad01b93a449e1 100644
--- a/libcxx/test/libcxx/memory/is_allocator.pass.cpp
+++ b/libcxx/test/libcxx/memory/is_allocator.pass.cpp
@@ -11,7 +11,7 @@
// UNSUPPORTED: c++03, c++11, c++14
// template<typename _Alloc>
-// struct __is_allocator;
+// inline const bool __is_allocator_v;
// Is either true_type or false_type depending on if A is an allocator.
@@ -23,15 +23,13 @@
#include "test_allocator.h"
template <typename T>
-void test_allocators()
-{
- static_assert(!std::__is_allocator<T>::value, "" );
- static_assert( std::__is_allocator<std::allocator<T>>::value, "" );
- static_assert( std::__is_allocator<test_allocator<T>>::value, "" );
- static_assert( std::__is_allocator<min_allocator<T>>::value, "" );
+void test_allocators() {
+ static_assert(!std::__is_allocator_v<T>, "");
+ static_assert(std::__is_allocator_v<std::allocator<T>>, "");
+ static_assert(std::__is_allocator_v<test_allocator<T>>, "");
+ static_assert(std::__is_allocator_v<min_allocator<T>>, "");
}
-
int main(int, char**)
{
// test_allocators<void>();
>From e80daf3942dd372f37d98e1a20e876a58f235ef8 Mon Sep 17 00:00:00 2001
From: Elizaveta Noskova <159026035+enoskova-sc at users.noreply.github.com>
Date: Wed, 24 Sep 2025 11:46:18 +0300
Subject: [PATCH 08/35] [RISCV] Don't merge pseudo selects with stack
adjustment instrs in between (#160105)
When we have sequence of select pseudo instructions with stack adjustment
instructions in between, we shouldn't apply the optimization, proposed by link
https://reviews.llvm.org/D59355. If optimization is applied,
function won't be marked `adjustsStack` during Finalize ISel pass.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 ++-
.../select-pseudo-merge-with-stack-adj.ll | 64 +++++++++++++++++++
2 files changed, 70 insertions(+), 2 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/select-pseudo-merge-with-stack-adj.ll
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 937213bfddfad..1ae5cb5730dc4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -22274,6 +22274,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
// - They are debug instructions. Otherwise,
// - They do not have side-effects, do not access memory and their inputs do
// not depend on the results of the select pseudo-instructions.
+ // - They don't adjust stack.
// The TrueV/FalseV operands of the selects cannot depend on the result of
// previous selects in the sequence.
// These conditions could be further relaxed. See the X86 target for a
@@ -22302,6 +22303,8 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
SelectDests.insert(MI.getOperand(0).getReg());
MachineInstr *LastSelectPseudo = &MI;
+ const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
+
for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
SequenceMBBI != E; ++SequenceMBBI) {
if (SequenceMBBI->isDebugInstr())
@@ -22321,7 +22324,9 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
}
if (SequenceMBBI->hasUnmodeledSideEffects() ||
SequenceMBBI->mayLoadOrStore() ||
- SequenceMBBI->usesCustomInsertionHook())
+ SequenceMBBI->usesCustomInsertionHook() ||
+ TII.isFrameInstr(*SequenceMBBI) ||
+ SequenceMBBI->isStackAligningInlineAsm())
break;
if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
@@ -22329,7 +22334,6 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
break;
}
- const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
DebugLoc DL = MI.getDebugLoc();
MachineFunction::iterator I = ++BB->getIterator();
diff --git a/llvm/test/CodeGen/RISCV/select-pseudo-merge-with-stack-adj.ll b/llvm/test/CodeGen/RISCV/select-pseudo-merge-with-stack-adj.ll
new file mode 100644
index 0000000000000..f0941120fc806
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/select-pseudo-merge-with-stack-adj.ll
@@ -0,0 +1,64 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc %s -mtriple riscv32 -verify-machineinstrs -o - | FileCheck %s
+
+define i32 @test(i1 %arg_1, i32 %arg_2) {
+; CHECK-LABEL: test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; CHECK-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; CHECK-NEXT: .cfi_offset ra, -4
+; CHECK-NEXT: .cfi_offset s0, -8
+; CHECK-NEXT: .cfi_offset s1, -12
+; CHECK-NEXT: andi s1, a0, 1
+; CHECK-NEXT: mv a0, a1
+; CHECK-NEXT: mv s0, a1
+; CHECK-NEXT: bnez s1, .LBB0_2
+; CHECK-NEXT: # %bb.1: # %entry
+; CHECK-NEXT: li s0, 1
+; CHECK-NEXT: .LBB0_2: # %entry
+; CHECK-NEXT: li a1, 7
+; CHECK-NEXT: call __udivsi3
+; CHECK-NEXT: bnez s1, .LBB0_4
+; CHECK-NEXT: # %bb.3: # %entry
+; CHECK-NEXT: li a0, 3
+; CHECK-NEXT: .LBB0_4: # %entry
+; CHECK-NEXT: bnez s1, .LBB0_6
+; CHECK-NEXT: # %bb.5: # %entry
+; CHECK-NEXT: mv s0, a0
+; CHECK-NEXT: .LBB0_6: # %entry
+; CHECK-NEXT: li a0, 3
+; CHECK-NEXT: .LBB0_7: # %body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: addi s0, s0, 4
+; CHECK-NEXT: bltu a0, s0, .LBB0_7
+; CHECK-NEXT: # %bb.8: # %exit
+; CHECK-NEXT: mv a0, s0
+; CHECK-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; CHECK-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; CHECK-NEXT: .cfi_restore ra
+; CHECK-NEXT: .cfi_restore s0
+; CHECK-NEXT: .cfi_restore s1
+; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: .cfi_def_cfa_offset 0
+; CHECK-NEXT: ret
+entry:
+ %sel_1 = select i1 %arg_1, i32 %arg_2, i32 1
+ %div = udiv i32 %arg_2, 7
+ %cond_1 = icmp ugt i32 %div, %sel_1
+ %sel_2 = select i1 %arg_1, i32 %div, i32 3
+ %sel = select i1 %arg_1, i32 %sel_1, i32 %sel_2
+ br label %body
+
+body:
+ %res = phi i32 [ %sel, %entry ], [ %add_loop, %body ]
+ %add_loop = add i32 4, %res
+ %cond_2 = icmp ugt i32 %add_loop, 3
+ br i1 %cond_2, label %body, label %exit
+
+exit:
+ ret i32 %add_loop
+}
>From 6613d8b5d6940e03c7d84cd7c91a84c9df6d999c Mon Sep 17 00:00:00 2001
From: jeanPerier <jperier at nvidia.com>
Date: Wed, 24 Sep 2025 10:52:32 +0200
Subject: [PATCH 09/35] [flang][OpenACC][NFC] remove legacy
openacc-unwrap-fir-box option (#160291)
Remove `openacc-unwrap-fir-box`.
I am working on a change that will map the acc data operation result to
the symbol so that accesses to variables that appeared in data clauses
inside a compute region are isolated from access to the variable outside
the region (using different SSA value).
This change will not work properly with the `openacc-unwrap-fir-box`
option that make the data operation result/operand a raw pointer, even
in cases where more information about the variable is needed to later
work with it.
It would likely be technically possible to reconstruct the variable from
the raw result. But this would add extra complexity to a legacy option.
I think it is better to remove it, and to work on some pass to "unbox"
as we can later (leveraging inlining).
---
flang/lib/Lower/OpenACC.cpp | 343 +----
flang/test/Lower/OpenACC/acc-bounds.f90 | 47 +-
...acc-data-operands-unwrap-defaultbounds.f90 | 152 --
.../OpenACC/acc-data-unwrap-defaultbounds.f90 | 205 ---
.../acc-declare-unwrap-defaultbounds.f90 | 478 -------
.../acc-enter-data-unwrap-defaultbounds.f90 | 818 -----------
.../acc-exit-data-unwrap-defaultbounds.f90 | 107 --
.../acc-host-data-unwrap-defaultbounds.f90 | 54 -
.../acc-private-unwrap-defaultbounds.f90 | 403 ------
.../acc-reduction-unwrap-defaultbounds.f90 | 1227 -----------------
10 files changed, 74 insertions(+), 3760 deletions(-)
delete mode 100644 flang/test/Lower/OpenACC/acc-data-operands-unwrap-defaultbounds.f90
delete mode 100644 flang/test/Lower/OpenACC/acc-data-unwrap-defaultbounds.f90
delete mode 100644 flang/test/Lower/OpenACC/acc-declare-unwrap-defaultbounds.f90
delete mode 100644 flang/test/Lower/OpenACC/acc-enter-data-unwrap-defaultbounds.f90
delete mode 100644 flang/test/Lower/OpenACC/acc-exit-data-unwrap-defaultbounds.f90
delete mode 100644 flang/test/Lower/OpenACC/acc-host-data-unwrap-defaultbounds.f90
delete mode 100644 flang/test/Lower/OpenACC/acc-private-unwrap-defaultbounds.f90
delete mode 100644 flang/test/Lower/OpenACC/acc-reduction-unwrap-defaultbounds.f90
diff --git a/flang/lib/Lower/OpenACC.cpp b/flang/lib/Lower/OpenACC.cpp
index 07234663cbef6..95d0adae02670 100644
--- a/flang/lib/Lower/OpenACC.cpp
+++ b/flang/lib/Lower/OpenACC.cpp
@@ -44,12 +44,6 @@
#define DEBUG_TYPE "flang-lower-openacc"
-static llvm::cl::opt<bool> unwrapFirBox(
- "openacc-unwrap-fir-box",
- llvm::cl::desc(
- "Whether to use the address from fix.box in data clause operations."),
- llvm::cl::init(false));
-
static llvm::cl::opt<bool> generateDefaultBounds(
"openacc-generate-default-bounds",
llvm::cl::desc("Whether to generate default bounds for arrays."),
@@ -73,7 +67,6 @@ static unsigned routineCounter = 0;
static constexpr llvm::StringRef accRoutinePrefix = "acc_routine_";
static constexpr llvm::StringRef accPrivateInitName = "acc.private.init";
static constexpr llvm::StringRef accReductionInitName = "acc.reduction.init";
-static constexpr llvm::StringRef accFirDescriptorPostfix = "_desc";
static mlir::Location
genOperandLocation(Fortran::lower::AbstractConverter &converter,
@@ -120,43 +113,6 @@ createDataEntryOp(fir::FirOpBuilder &builder, mlir::Location loc,
llvm::ArrayRef<mlir::Attribute> asyncOnlyDeviceTypes,
bool unwrapBoxAddr = false, mlir::Value isPresent = {}) {
mlir::Value varPtrPtr;
- // The data clause may apply to either the box reference itself or the
- // pointer to the data it holds. So use `unwrapBoxAddr` to decide.
- // When we have a box value - assume it refers to the data inside box.
- if (unwrapFirBox &&
- ((fir::isBoxAddress(baseAddr.getType()) && unwrapBoxAddr) ||
- fir::isa_box_type(baseAddr.getType()))) {
- if (isPresent) {
- mlir::Type ifRetTy =
- mlir::cast<fir::BaseBoxType>(fir::unwrapRefType(baseAddr.getType()))
- .getEleTy();
- if (!fir::isa_ref_type(ifRetTy))
- ifRetTy = fir::ReferenceType::get(ifRetTy);
- baseAddr =
- builder
- .genIfOp(loc, {ifRetTy}, isPresent,
- /*withElseRegion=*/true)
- .genThen([&]() {
- if (fir::isBoxAddress(baseAddr.getType()))
- baseAddr = fir::LoadOp::create(builder, loc, baseAddr);
- mlir::Value boxAddr =
- fir::BoxAddrOp::create(builder, loc, baseAddr);
- fir::ResultOp::create(builder, loc, mlir::ValueRange{boxAddr});
- })
- .genElse([&] {
- mlir::Value absent =
- fir::AbsentOp::create(builder, loc, ifRetTy);
- fir::ResultOp::create(builder, loc, mlir::ValueRange{absent});
- })
- .getResults()[0];
- } else {
- if (fir::isBoxAddress(baseAddr.getType()))
- baseAddr = fir::LoadOp::create(builder, loc, baseAddr);
- baseAddr = fir::BoxAddrOp::create(builder, loc, baseAddr);
- }
- retTy = baseAddr.getType();
- }
-
llvm::SmallVector<mlir::Value, 8> operands;
llvm::SmallVector<int32_t, 8> operandSegments;
@@ -246,46 +202,14 @@ static void createDeclareAllocFuncWithArg(mlir::OpBuilder &modBuilder,
llvm::SmallVector<mlir::Value> bounds;
std::stringstream asFortranDesc;
asFortranDesc << asFortran.str();
- if (unwrapFirBox)
- asFortranDesc << accFirDescriptorPostfix.str();
-
- // For descriptor, preserve old behavior when unwrapping FIR box: update.
- if (unwrapFirBox) {
- mlir::acc::UpdateDeviceOp updateDeviceOp =
- createDataEntryOp<mlir::acc::UpdateDeviceOp>(
- builder, loc, registerFuncOp.getArgument(0), asFortranDesc, bounds,
- /*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, descTy,
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- llvm::SmallVector<int32_t> operandSegments{0, 0, 0, 1};
- llvm::SmallVector<mlir::Value> operands{updateDeviceOp.getResult()};
- createSimpleOp<mlir::acc::UpdateOp>(builder, loc, operands,
- operandSegments);
- } else {
- // New behavior: start a structured region with declare_enter.
- EntryOp descEntryOp = createDataEntryOp<EntryOp>(
- builder, loc, registerFuncOp.getArgument(0), asFortranDesc, bounds,
- /*structured=*/false, /*implicit=*/true, clause, descTy,
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareEnterOp::create(
- builder, loc,
- mlir::acc::DeclareTokenType::get(descEntryOp.getContext()),
- mlir::ValueRange(descEntryOp.getAccVar()));
- }
-
- if (unwrapFirBox) {
- mlir::Value desc =
- fir::LoadOp::create(builder, loc, registerFuncOp.getArgument(0));
- fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, desc);
- addDeclareAttr(builder, boxAddrOp.getOperation(), clause);
- EntryOp entryOp = createDataEntryOp<EntryOp>(
- builder, loc, boxAddrOp.getResult(), asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareEnterOp::create(
- builder, loc, mlir::acc::DeclareTokenType::get(entryOp.getContext()),
- mlir::ValueRange(entryOp.getAccVar()));
- }
+ // Start a structured region with declare_enter.
+ EntryOp descEntryOp = createDataEntryOp<EntryOp>(
+ builder, loc, registerFuncOp.getArgument(0), asFortranDesc, bounds,
+ /*structured=*/false, /*implicit=*/true, clause, descTy,
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
+ mlir::acc::DeclareEnterOp::create(
+ builder, loc, mlir::acc::DeclareTokenType::get(descEntryOp.getContext()),
+ mlir::ValueRange(descEntryOp.getAccVar()));
modBuilder.setInsertionPointAfter(registerFuncOp);
builder.restoreInsertionPoint(crtInsPt);
@@ -307,67 +231,32 @@ static void createDeclareDeallocFuncWithArg(
modBuilder, builder, loc, preDeallocFuncName.str(), {descTy}, {loc});
mlir::Value var = preDeallocOp.getArgument(0);
- if (unwrapFirBox) {
- mlir::Value loadOp =
- fir::LoadOp::create(builder, loc, preDeallocOp.getArgument(0));
- fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, loadOp);
- addDeclareAttr(builder, boxAddrOp.getOperation(), clause);
- var = boxAddrOp.getResult();
- }
llvm::SmallVector<mlir::Value> bounds;
- if (unwrapFirBox) {
- // Unwrap: delete device payload using getdeviceptr + declare_exit + ExitOp
- mlir::acc::GetDevicePtrOp entryOp =
- createDataEntryOp<mlir::acc::GetDevicePtrOp>(
- builder, loc, var, asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause, var.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
- mlir::ValueRange(entryOp.getAccVar()));
-
- if constexpr (std::is_same_v<ExitOp, mlir::acc::CopyoutOp> ||
- std::is_same_v<ExitOp, mlir::acc::UpdateHostOp>)
- ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
- entryOp.getVar(), entryOp.getVarType(),
- entryOp.getBounds(), entryOp.getAsyncOperands(),
- entryOp.getAsyncOperandsDeviceTypeAttr(),
- entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
- /*structured=*/false, /*implicit=*/false,
- builder.getStringAttr(*entryOp.getName()));
- else
- ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
- entryOp.getBounds(), entryOp.getAsyncOperands(),
- entryOp.getAsyncOperandsDeviceTypeAttr(),
- entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
- /*structured=*/false, /*implicit=*/false,
- builder.getStringAttr(*entryOp.getName()));
- } else {
- mlir::acc::GetDevicePtrOp entryOp =
- createDataEntryOp<mlir::acc::GetDevicePtrOp>(
- builder, loc, var, asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause, var.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
- mlir::ValueRange(entryOp.getAccVar()));
-
- if constexpr (std::is_same_v<ExitOp, mlir::acc::CopyoutOp> ||
- std::is_same_v<ExitOp, mlir::acc::UpdateHostOp>)
- ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
- entryOp.getVar(), entryOp.getVarType(),
- entryOp.getBounds(), entryOp.getAsyncOperands(),
- entryOp.getAsyncOperandsDeviceTypeAttr(),
- entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
- /*structured=*/false, /*implicit=*/false,
- builder.getStringAttr(*entryOp.getName()));
- else
- ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
- entryOp.getBounds(), entryOp.getAsyncOperands(),
- entryOp.getAsyncOperandsDeviceTypeAttr(),
- entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
- /*structured=*/false, /*implicit=*/false,
- builder.getStringAttr(*entryOp.getName()));
- }
+ mlir::acc::GetDevicePtrOp entryOp =
+ createDataEntryOp<mlir::acc::GetDevicePtrOp>(
+ builder, loc, var, asFortran, bounds,
+ /*structured=*/false, /*implicit=*/false, clause, var.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
+ mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
+ mlir::ValueRange(entryOp.getAccVar()));
+
+ if constexpr (std::is_same_v<ExitOp, mlir::acc::CopyoutOp> ||
+ std::is_same_v<ExitOp, mlir::acc::UpdateHostOp>)
+ ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
+ entryOp.getVar(), entryOp.getVarType(), entryOp.getBounds(),
+ entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
+ /*structured=*/false, /*implicit=*/false,
+ builder.getStringAttr(*entryOp.getName()));
+ else
+ ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
+ entryOp.getBounds(), entryOp.getAsyncOperands(),
+ entryOp.getAsyncOperandsDeviceTypeAttr(),
+ entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
+ /*structured=*/false, /*implicit=*/false,
+ builder.getStringAttr(*entryOp.getName()));
// Generate the post dealloc function.
modBuilder.setInsertionPointAfter(preDeallocOp);
@@ -378,33 +267,14 @@ static void createDeclareDeallocFuncWithArg(
modBuilder, builder, loc, postDeallocFuncName.str(), {descTy}, {loc});
var = postDeallocOp.getArgument(0);
- if (unwrapFirBox) {
- var = fir::LoadOp::create(builder, loc, postDeallocOp.getArgument(0));
- asFortran << accFirDescriptorPostfix.str();
- }
-
- if (unwrapFirBox) {
- // Old behavior: update descriptor after deallocation.
- mlir::acc::UpdateDeviceOp updateDeviceOp =
- createDataEntryOp<mlir::acc::UpdateDeviceOp>(
- builder, loc, var, asFortran, bounds,
- /*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, var.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- llvm::SmallVector<int32_t> operandSegments{0, 0, 0, 1};
- llvm::SmallVector<mlir::Value> operands{updateDeviceOp.getResult()};
- createSimpleOp<mlir::acc::UpdateOp>(builder, loc, operands,
- operandSegments);
- } else {
- // New behavior: end structured region with declare_exit.
- mlir::acc::GetDevicePtrOp postEntryOp =
- createDataEntryOp<mlir::acc::GetDevicePtrOp>(
- builder, loc, var, asFortran, bounds,
- /*structured=*/false, /*implicit=*/true, clause, var.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
- mlir::ValueRange(postEntryOp.getAccVar()));
- }
+ // End structured region with declare_exit.
+ mlir::acc::GetDevicePtrOp postEntryOp =
+ createDataEntryOp<mlir::acc::GetDevicePtrOp>(
+ builder, loc, var, asFortran, bounds,
+ /*structured=*/false, /*implicit=*/true, clause, var.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
+ mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
+ mlir::ValueRange(postEntryOp.getAccVar()));
modBuilder.setInsertionPointAfter(postDeallocOp);
builder.restoreInsertionPoint(crtInsPt);
}
@@ -780,7 +650,7 @@ genDataOperandOperations(const Fortran::parser::AccObjectList &objectList,
mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>(
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds,
- /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox,
+ /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false,
/*genDefaultBounds=*/generateDefaultBounds,
/*strideIncludeLowerExtent=*/strideIncludeLowerExtent);
LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
@@ -839,7 +709,7 @@ static void genDeclareDataOperandOperations(
mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>(
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds,
- /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox,
+ /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false,
/*genDefaultBounds=*/generateDefaultBounds,
/*strideIncludeLowerExtent=*/strideIncludeLowerExtent);
LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
@@ -1409,7 +1279,7 @@ static void genPrivatizationRecipes(
mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>(
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds,
- /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox,
+ /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false,
/*genDefaultBounds=*/generateDefaultBounds,
/*strideIncludeLowerExtent=*/strideIncludeLowerExtent);
LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
@@ -1842,7 +1712,7 @@ genReductions(const Fortran::parser::AccObjectListWithReduction &objectList,
mlir::acc::DataBoundsOp, mlir::acc::DataBoundsType>(
converter, builder, semanticsContext, stmtCtx, symbol, designator,
operandLocation, asFortran, bounds,
- /*treatIndexAsSection=*/true, /*unwrapFirBox=*/unwrapFirBox,
+ /*treatIndexAsSection=*/true, /*unwrapFirBox=*/false,
/*genDefaultBounds=*/generateDefaultBounds,
/*strideIncludeLowerExtent=*/strideIncludeLowerExtent);
LLVM_DEBUG(llvm::dbgs() << __func__ << "\n"; info.dump(llvm::dbgs()));
@@ -4052,45 +3922,15 @@ static void createDeclareAllocFunc(mlir::OpBuilder &modBuilder,
asFortran << Fortran::lower::mangle::demangleName(globalOp.getSymName());
std::stringstream asFortranDesc;
asFortranDesc << asFortran.str();
- if (unwrapFirBox)
- asFortranDesc << accFirDescriptorPostfix.str();
llvm::SmallVector<mlir::Value> bounds;
- // For unwrapFirBox=false this remains declare_enter; for unwrapFirBox=true,
- // the descriptor post-alloc remains update behavior.
- if (unwrapFirBox) {
- mlir::acc::UpdateDeviceOp updDesc =
- createDataEntryOp<mlir::acc::UpdateDeviceOp>(
- builder, loc, addrOp, asFortranDesc, bounds,
- /*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, addrOp.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- llvm::SmallVector<int32_t> seg{0, 0, 0, 1};
- llvm::SmallVector<mlir::Value> ops{updDesc.getResult()};
- createSimpleOp<mlir::acc::UpdateOp>(builder, loc, ops, seg);
- } else {
- EntryOp descEntryOp = createDataEntryOp<EntryOp>(
- builder, loc, addrOp, asFortranDesc, bounds,
- /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareEnterOp::create(
- builder, loc,
- mlir::acc::DeclareTokenType::get(descEntryOp.getContext()),
- mlir::ValueRange(descEntryOp.getAccVar()));
- }
-
- if (unwrapFirBox) {
- auto loadOp = fir::LoadOp::create(builder, loc, addrOp.getResult());
- fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, loadOp);
- addDeclareAttr(builder, boxAddrOp.getOperation(), clause);
- EntryOp entryOp = createDataEntryOp<EntryOp>(
- builder, loc, boxAddrOp.getResult(), asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause, boxAddrOp.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareEnterOp::create(
- builder, loc, mlir::acc::DeclareTokenType::get(entryOp.getContext()),
- mlir::ValueRange(entryOp.getAccVar()));
- }
+ EntryOp descEntryOp = createDataEntryOp<EntryOp>(
+ builder, loc, addrOp, asFortranDesc, bounds,
+ /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
+ mlir::acc::DeclareEnterOp::create(
+ builder, loc, mlir::acc::DeclareTokenType::get(descEntryOp.getContext()),
+ mlir::ValueRange(descEntryOp.getAccVar()));
modBuilder.setInsertionPointAfter(registerFuncOp);
}
@@ -4108,56 +3948,6 @@ static void createDeclareDeallocFunc(mlir::OpBuilder &modBuilder,
std::stringstream asFortran;
asFortran << Fortran::lower::mangle::demangleName(globalOp.getSymName());
- // If FIR box semantics are being unwrapped, then a pre-dealloc function
- // needs generated to ensure to delete the device data pointed to by the
- // descriptor before this information is lost.
- if (unwrapFirBox) {
- // Generate the pre dealloc function.
- std::stringstream preDeallocFuncName;
- preDeallocFuncName << globalOp.getSymName().str()
- << Fortran::lower::declarePreDeallocSuffix.str();
- auto preDeallocOp =
- createDeclareFunc(modBuilder, builder, loc, preDeallocFuncName.str());
-
- fir::AddrOfOp addrOp = fir::AddrOfOp::create(
- builder, loc, fir::ReferenceType::get(globalOp.getType()),
- globalOp.getSymbol());
- auto loadOp = fir::LoadOp::create(builder, loc, addrOp.getResult());
- fir::BoxAddrOp boxAddrOp = fir::BoxAddrOp::create(builder, loc, loadOp);
- mlir::Value var = boxAddrOp.getResult();
- addDeclareAttr(builder, var.getDefiningOp(), clause);
-
- llvm::SmallVector<mlir::Value> bounds;
- mlir::acc::GetDevicePtrOp entryOp =
- createDataEntryOp<mlir::acc::GetDevicePtrOp>(
- builder, loc, var, asFortran, bounds,
- /*structured=*/false, /*implicit=*/false, clause, var.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
-
- mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
- mlir::ValueRange(entryOp.getAccVar()));
-
- if constexpr (std::is_same_v<ExitOp, mlir::acc::CopyoutOp> ||
- std::is_same_v<ExitOp, mlir::acc::UpdateHostOp>)
- ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
- entryOp.getVar(), entryOp.getBounds(),
- entryOp.getAsyncOperands(),
- entryOp.getAsyncOperandsDeviceTypeAttr(),
- entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
- /*structured=*/false, /*implicit=*/false,
- builder.getStringAttr(*entryOp.getName()));
- else
- ExitOp::create(builder, entryOp.getLoc(), entryOp.getAccVar(),
- entryOp.getBounds(), entryOp.getAsyncOperands(),
- entryOp.getAsyncOperandsDeviceTypeAttr(),
- entryOp.getAsyncOnlyAttr(), entryOp.getDataClause(),
- /*structured=*/false, /*implicit=*/false,
- builder.getStringAttr(*entryOp.getName()));
-
- // Generate the post dealloc function.
- modBuilder.setInsertionPointAfter(preDeallocOp);
- }
-
std::stringstream postDeallocFuncName;
postDeallocFuncName << globalOp.getSymName().str()
<< Fortran::lower::declarePostDeallocSuffix.str();
@@ -4167,30 +3957,15 @@ static void createDeclareDeallocFunc(mlir::OpBuilder &modBuilder,
fir::AddrOfOp addrOp = fir::AddrOfOp::create(
builder, loc, fir::ReferenceType::get(globalOp.getType()),
globalOp.getSymbol());
- if (unwrapFirBox)
- asFortran << accFirDescriptorPostfix.str();
llvm::SmallVector<mlir::Value> bounds;
- if (unwrapFirBox) {
- // Unwrap mode: update the descriptor after deallocation (no declare_exit).
- mlir::acc::UpdateDeviceOp updDesc =
- createDataEntryOp<mlir::acc::UpdateDeviceOp>(
- builder, loc, addrOp, asFortran, bounds,
- /*structured=*/false, /*implicit=*/true,
- mlir::acc::DataClause::acc_update_device, addrOp.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- llvm::SmallVector<int32_t> seg{0, 0, 0, 1};
- llvm::SmallVector<mlir::Value> ops{updDesc.getResult()};
- createSimpleOp<mlir::acc::UpdateOp>(builder, loc, ops, seg);
- } else {
- // Default: end the structured declare region using declare_exit.
- mlir::acc::GetDevicePtrOp descEntryOp =
- createDataEntryOp<mlir::acc::GetDevicePtrOp>(
- builder, loc, addrOp, asFortran, bounds,
- /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(),
- /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
- mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
- mlir::ValueRange(descEntryOp.getAccVar()));
- }
+ // End the structured declare region using declare_exit.
+ mlir::acc::GetDevicePtrOp descEntryOp =
+ createDataEntryOp<mlir::acc::GetDevicePtrOp>(
+ builder, loc, addrOp, asFortran, bounds,
+ /*structured=*/false, /*implicit=*/true, clause, addrOp.getType(),
+ /*async=*/{}, /*asyncDeviceTypes=*/{}, /*asyncOnlyDeviceTypes=*/{});
+ mlir::acc::DeclareExitOp::create(builder, loc, mlir::Value{},
+ mlir::ValueRange(descEntryOp.getAccVar()));
modBuilder.setInsertionPointAfter(postDeallocOp);
}
diff --git a/flang/test/Lower/OpenACC/acc-bounds.f90 b/flang/test/Lower/OpenACC/acc-bounds.f90
index cff53a2bfd122..f6996df6d2454 100644
--- a/flang/test/Lower/OpenACC/acc-bounds.f90
+++ b/flang/test/Lower/OpenACC/acc-bounds.f90
@@ -1,6 +1,6 @@
! This test checks lowering of OpenACC data bounds operation.
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
+! RUN: bbc -fopenacc -emit-hlfir --openacc-generate-default-bounds=true %s -o - | FileCheck %s
module openacc_bounds
@@ -23,18 +23,12 @@ subroutine acc_derived_type_component_pointer_array()
end subroutine
! CHECK-LABEL: func.func @_QMopenacc_boundsPacc_derived_type_component_pointer_array() {
-! CHECK: %[[D:.*]] = fir.alloca !fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}> {bindc_name = "d", uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"}
-! CHECK: %[[DECL_D:.*]]:2 = hlfir.declare %[[D]] {uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"} : (!fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>) -> (!fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>, !fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>)
-! CHECK: %[[COORD:.*]] = hlfir.designate %[[DECL_D]]#0{"array_comp"} {fortran_attrs = #fir.var_attrs<pointer>} : (!fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
-! CHECK: %[[LOAD:.*]] = fir.load %[[COORD]] : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
-! CHECK: %[[BOX_DIMS0:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box<!fir.ptr<!fir.array<?xi32>>>, index) -> (index, index, index)
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[BOX_DIMS1:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box<!fir.ptr<!fir.array<?xi32>>>, index) -> (index, index, index)
-! CHECK: %[[UB:.*]] = arith.subi %[[BOX_DIMS1]]#1, %[[C1]] : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[BOX_DIMS1]]#1 : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.ptr<!fir.array<?xi32>>>) -> !fir.ptr<!fir.array<?xi32>>
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.ptr<!fir.array<?xi32>> {name = "d%array_comp", structured = false}
-! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ptr<!fir.array<?xi32>>)
+! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}> {bindc_name = "d", uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"}
+! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QMopenacc_boundsFacc_derived_type_component_pointer_arrayEd"} : (!fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>) -> (!fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>, !fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>)
+! CHECK: %[[VAL_4:.*]] = hlfir.designate %[[VAL_2]]#0{"array_comp"} {fortran_attrs = #fir.var_attrs<pointer>} : (!fir.ref<!fir.type<_QMopenacc_boundsTt1{array_comp:!fir.box<!fir.ptr<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
+! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_4]] : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
+! CHECK: %[[VAL_6:.*]] = acc.create var(%[[VAL_5]] : !fir.box<!fir.ptr<!fir.array<?xi32>>>) -> !fir.box<!fir.ptr<!fir.array<?xi32>>> {name = "d%[[VAL_7:.*]]", structured = false}
+! CHECK: acc.enter_data dataOperands(%[[VAL_6]] : !fir.box<!fir.ptr<!fir.array<?xi32>>>)
! CHECK: return
! CHECK: }
@@ -73,9 +67,8 @@ subroutine acc_derived_type_component_allocatable_array()
! CHECK: %[[BOX_DIMS1:.*]]:3 = fir.box_dims %[[LOAD]], %c0{{.*}} : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
! CHECK: %[[UB:.*]] = arith.subi %[[BOX_DIMS1]]#1, %[[C1]] : index
! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[BOX_DIMS1]]#1 : index) stride(%[[BOX_DIMS1]]#2 : index) startIdx(%[[BOX_DIMS0]]#0 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xi32>> {name = "d%array_comp", structured = false}
-! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xi32>>)
+! CHECK: %[[CREATE:.*]] = acc.create var(%[[LOAD]] : !fir.box<!fir.heap<!fir.array<?xi32>>>) bounds(%[[BOUND]]) -> !fir.box<!fir.heap<!fir.array<?xi32>>> {name = "d%[[VAL_15:.*]]", structured = false}
+! CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.box<!fir.heap<!fir.array<?xi32>>>)
! CHECK: return
! CHECK: }
@@ -92,9 +85,8 @@ subroutine acc_undefined_extent(a)
! CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECL_ARG0]]#0, %c0{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
! CHECK: %[[UB:.*]] = arith.subi %[[DIMS0]]#1, %c1{{.*}} : index
! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS0]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%c1{{.*}} : index) {strideInBytes = true}
-! CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECL_ARG0]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a"}
-! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.ref<!fir.array<?xf32>>)
+! CHECK: %[[PRESENT:.*]] = acc.present var(%[[DECL_ARG0]]#0 : !fir.box<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.box<!fir.array<?xf32>> {name = "a"}
+! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.box<!fir.array<?xf32>>) {
subroutine acc_multi_strides(a)
real, dimension(:,:,:) :: a
@@ -114,9 +106,8 @@ subroutine acc_multi_strides(a)
! CHECK: %[[STRIDE2:.*]] = arith.muli %[[STRIDE1]], %[[BOX_DIMS1]]#1 : index
! CHECK: %[[BOX_DIMS2:.*]]:3 = fir.box_dims %[[DECL_ARG0]]#0, %c2{{.*}} : (!fir.box<!fir.array<?x?x?xf32>>, index) -> (index, index, index)
! CHECK: %[[BOUNDS2:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[BOX_DIMS2]]#1 : index) stride(%[[STRIDE2]] : index) startIdx(%{{.*}} : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_ARG0]]#0 : (!fir.box<!fir.array<?x?x?xf32>>) -> !fir.ref<!fir.array<?x?x?xf32>>
-! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?x?x?xf32>>) bounds(%[[BOUNDS0]], %[[BOUNDS1]], %[[BOUNDS2]]) -> !fir.ref<!fir.array<?x?x?xf32>> {name = "a"}
-! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.ref<!fir.array<?x?x?xf32>>) {
+! CHECK: %[[PRESENT:.*]] = acc.present var(%[[DECL_ARG0]]#0 : !fir.box<!fir.array<?x?x?xf32>>) bounds(%[[BOUNDS0]], %[[BOUNDS1]], %[[BOUNDS2]]) -> !fir.box<!fir.array<?x?x?xf32>> {name = "a"}
+! CHECK: acc.kernels dataOperands(%[[PRESENT]] : !fir.box<!fir.array<?x?x?xf32>>) {
subroutine acc_optional_data(a)
real, pointer, optional :: a(:)
@@ -137,16 +128,8 @@ subroutine acc_optional_data(a)
! CHECK: fir.result %[[C0]], %[[CM1]], %[[C0]], %[[C0]], %[[C0]] : index, index, index, index, index
! CHECK: }
! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[RES]]#0 : index) upperbound(%[[RES]]#1 : index) extent(%[[RES]]#2 : index) stride(%[[RES]]#3 : index) startIdx(%[[RES]]#4 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.if %[[IS_PRESENT]] -> (!fir.ptr<!fir.array<?xf32>>) {
-! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
-! CHECK: %[[ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>) -> !fir.ptr<!fir.array<?xf32>>
-! CHECK: fir.result %[[ADDR]] : !fir.ptr<!fir.array<?xf32>>
-! CHECK: } else {
-! CHECK: %[[ABSENT:.*]] = fir.absent !fir.ptr<!fir.array<?xf32>>
-! CHECK: fir.result %[[ABSENT]] : !fir.ptr<!fir.array<?xf32>>
-! CHECK: }
-! CHECK: %[[ATTACH:.*]] = acc.attach varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ptr<!fir.array<?xf32>> {name = "a"}
-! CHECK: acc.data dataOperands(%[[ATTACH]] : !fir.ptr<!fir.array<?xf32>>)
+! CHECK: %[[ATTACH:.*]] = acc.attach varPtr(%[[ARG0_DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>> {name = "a"}
+! CHECK: acc.data dataOperands(%[[ATTACH]] : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>) {
subroutine acc_optional_data2(a, n)
integer :: n
diff --git a/flang/test/Lower/OpenACC/acc-data-operands-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-data-operands-unwrap-defaultbounds.f90
deleted file mode 100644
index 7f89fe2dd523e..0000000000000
--- a/flang/test/Lower/OpenACC/acc-data-operands-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,152 +0,0 @@
-! This test checks lowering of complex OpenACC data operands and checks
-! that default bounds are generated.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-module acc_data_operand
-
- type wrapper
- real :: data(100)
- end type
-
-contains
-
-! Testing derived-type component without section
-subroutine acc_operand_derived_type_component()
- type(wrapper) :: w
-
- !$acc data copy(w%data)
- !$acc end data
-end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_derived_type_component() {
-! CHECK: %[[W:.*]] = fir.alloca !fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}> {bindc_name = "w", uniq_name = "_QMacc_data_operandFacc_operand_derived_type_componentEw"}
-! CHECK: %[[DECLW:.*]]:2 = hlfir.declare %[[W]]
-! CHECK: %[[EXT:.*]] = arith.constant 100 : index
-! CHECK: %[[COORD_DATA:.*]] = hlfir.designate %[[DECLW]]#0{"data"} shape %{{.*}} : (!fir.ref<!fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<100xf32>>
-! CHECK: %[[ONE:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.subi %[[EXT]], %[[ONE]] : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-! CHECK: %[[COPY_COPYIN:.*]] = acc.copyin varPtr(%[[COORD_DATA]] : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xf32>> {dataClause = #acc<data_clause acc_copy>, name = "w%data"}
-! CHECK: acc.data dataOperands(%[[COPY_COPYIN]] : !fir.ref<!fir.array<100xf32>>) {
-! CHECK: acc.terminator
-! CHECK: }
-! CHECK: acc.copyout accPtr(%[[COPY_COPYIN]] : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) to varPtr(%[[COORD_DATA]] : !fir.ref<!fir.array<100xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "w%data"}
-
-
-! Testing array of derived-type component without section
-subroutine acc_operand_array_derived_type_component()
- type(wrapper) :: w(10)
-
- !$acc data copy(w(1)%data)
- !$acc end data
-end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_derived_type_component() {
-! CHECK: %[[W:.*]] = fir.alloca !fir.array<10x!fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}>> {bindc_name = "w", uniq_name = "_QMacc_data_operandFacc_operand_array_derived_type_componentEw"}
-! CHECK: %[[DECLW:.*]]:2 = hlfir.declare %[[W]]
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[W_1:.*]] = hlfir.designate %[[DECLW]]#0 (%[[C1]]) : (!fir.ref<!fir.array<10x!fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}>>>, index) -> !fir.ref<!fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}>>
-! CHECK: %[[EXT:.*]] = arith.constant 100 : index
-! CHECK: %[[COORD_W1_DATA:.*]] = hlfir.designate %[[W_1]]{"data"} shape %{{.*}} : (!fir.ref<!fir.type<_QMacc_data_operandTwrapper{data:!fir.array<100xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<100xf32>>
-! CHECK: %[[ONE:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.subi %[[EXT]], %[[ONE]] : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-! CHECK: %[[COPY_COPYIN:.*]] = acc.copyin varPtr(%[[COORD_W1_DATA]] : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xf32>> {dataClause = #acc<data_clause acc_copy>, name = "w(1_8)%data"}
-! CHECK: acc.data dataOperands(%[[COPY_COPYIN]] : !fir.ref<!fir.array<100xf32>>) {
-! CHECK: acc.terminator
-! CHECK: }
-! CHECK: acc.copyout accPtr(%[[COPY_COPYIN]] : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) to varPtr(%[[COORD_W1_DATA]] : !fir.ref<!fir.array<100xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "w(1_8)%data"}
-
-! Testing array sections on allocatable array
-subroutine acc_operand_array_section_allocatable()
- real, allocatable :: a(:)
-
- allocate(a(100))
-
- !$acc data copyin(a(1:50)) copyout(a(51:100))
- !$acc end data
-
- deallocate(a)
-end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_section_allocatable() {
-! CHECK: %[[A:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", uniq_name = "_QMacc_data_operandFacc_operand_array_section_allocatableEa"}
-! CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]] {fortran_attrs = #fir.var_attrs<allocatable>
-! CHECK: %[[LOAD_BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! CHECK: %[[LOAD_BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_0:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_1]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.subi %[[C1]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[C50:.*]] = arith.constant 50 : index
-! CHECK: %[[UB:.*]] = arith.subi %[[C50]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[LOAD_BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[BOUND_1_50:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND_1_50]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(1:50)"}
-! CHECK: %[[LOAD_BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! CHECK: %[[LOAD_BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_0:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_1]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[C51:.*]] = arith.constant 51 : index
-! CHECK: %[[LB:.*]] = arith.subi %[[C51]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[C100:.*]] = arith.constant 100 : index
-! CHECK: %[[UB:.*]] = arith.subi %[[C100]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[LOAD_BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[BOUND_51_100:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-! CHECK: %[[COPYOUT_CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND_51_100]]) -> !fir.heap<!fir.array<?xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a(51:100)"}
-! CHECK: acc.data dataOperands(%[[COPYIN]], %[[COPYOUT_CREATE]] : !fir.heap<!fir.array<?xf32>>, !fir.heap<!fir.array<?xf32>>) {
-! CHECK: acc.terminator
-! CHECK: }
-! CHECK: acc.delete accPtr(%[[COPYIN]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND_1_50]]) {dataClause = #acc<data_clause acc_copyin>, name = "a(1:50)"}
-! CHECK: acc.copyout accPtr(%[[COPYOUT_CREATE]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND_51_100]]) to varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) {name = "a(51:100)"}
-
-
-! Testing array sections on pointer array
-subroutine acc_operand_array_section_pointer()
- real, target :: a(100)
- real, pointer :: p(:)
-
- p => a
-
- !$acc data copyin(p(1:50))
- !$acc end data
-end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_data_operandPacc_operand_array_section_pointer() {
-! CHECK: %[[P:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?xf32>>> {bindc_name = "p", uniq_name = "_QMacc_data_operandFacc_operand_array_section_pointerEp"}
-! CHECK: %[[DECLP:.*]]:2 = hlfir.declare %[[P]] {fortran_attrs = #fir.var_attrs<pointer>
-! CHECK: %[[LOAD_BOX_P_0:.*]] = fir.load %[[DECLP]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
-! CHECK: %[[LOAD_BOX_P_1:.*]] = fir.load %[[DECLP]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_0:.*]]:3 = fir.box_dims %[[LOAD_BOX_P_1]], %[[C0:.*]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[LOAD_BOX_P_0]], %[[C0]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.subi %[[C1]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[C50:.*]] = arith.constant 50 : index
-! CHECK: %[[UB:.*]] = arith.subi %[[C50]], %[[DIMS0_0]]#0 : index
-! CHECK: %[[LOAD_BOX_P_2:.*]] = fir.load %[[DECLP]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[DIMS0_2:.*]]:3 = fir.box_dims %[[LOAD_BOX_P_2]], %[[C0]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_2]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0_0]]#0 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD_BOX_P_0]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>) -> !fir.ptr<!fir.array<?xf32>>
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ptr<!fir.array<?xf32>> {name = "p(1:50)"}
-! CHECK: acc.data dataOperands(%[[COPYIN]] : !fir.ptr<!fir.array<?xf32>>) {
-! CHECK: acc.terminator
-! CHECK: }
-! CHECK: acc.delete accPtr(%[[COPYIN]] : !fir.ptr<!fir.array<?xf32>>) bounds(%[[BOUND]]) {dataClause = #acc<data_clause acc_copyin>, name = "p(1:50)"}
-
-end module
diff --git a/flang/test/Lower/OpenACC/acc-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-data-unwrap-defaultbounds.f90
deleted file mode 100644
index 789db34adefee..0000000000000
--- a/flang/test/Lower/OpenACC/acc-data-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,205 +0,0 @@
-! This test checks lowering of OpenACC data directive.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-subroutine acc_data
- real, dimension(10, 10) :: a, b, c
- real, pointer :: d, e
- logical :: ifCondition = .TRUE.
-
-! CHECK: %[[A:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ea"}
-! CHECK:%[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-! CHECK: %[[B:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Eb"}
-! CHECK:%[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[C:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ec"}
-! CHECK:%[[DECLC:.*]]:2 = hlfir.declare %[[C]]
-! CHECK: %[[D:.*]] = fir.alloca !fir.box<!fir.ptr<f32>> {bindc_name = "d", uniq_name = "{{.*}}Ed"}
-! CHECK:%[[DECLD:.*]]:2 = hlfir.declare %[[D]]
-! CHECK: %[[E:.*]] = fir.alloca !fir.box<!fir.ptr<f32>> {bindc_name = "e", uniq_name = "{{.*}}Ee"}
-! CHECK:%[[DECLE:.*]]:2 = hlfir.declare %[[E]]
-
- !$acc data if(.TRUE.) copy(a)
- !$acc end data
-
-! CHECK: %[[IF1:.*]] = arith.constant true
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: acc.data if(%[[IF1]]) dataOperands(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK:acc.copyout accPtr(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
-
- !$acc data copy(a) if(ifCondition)
- !$acc end data
-
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: %[[IFCOND:.*]] = fir.load %{{.*}} : !fir.ref<!fir.logical<4>>
-! CHECK: %[[IF2:.*]] = fir.convert %[[IFCOND]] : (!fir.logical<4>) -> i1
-! CHECK: acc.data if(%[[IF2]]) dataOperands(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK:acc.copyout accPtr(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
-
- !$acc data copy(a, b, c)
- !$acc end data
-
-! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: %[[COPYIN_C:.*]] = acc.copyin varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "c"}
-! CHECK: acc.data dataOperands(%[[COPYIN_A]], %[[COPYIN_B]], %[[COPYIN_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK:acc.copyout accPtr(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK:acc.copyout accPtr(%[[COPYIN_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK:acc.copyout accPtr(%[[COPYIN_C]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "c"}
-
- !$acc data copy(a) copy(b) copy(c)
- !$acc end data
-
-! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK: %[[COPYIN_C:.*]] = acc.copyin varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "c"}
-! CHECK: acc.data dataOperands(%[[COPYIN_A]], %[[COPYIN_B]], %[[COPYIN_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK:acc.copyout accPtr(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK:acc.copyout accPtr(%[[COPYIN_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK:acc.copyout accPtr(%[[COPYIN_C]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "c"}
-
- !$acc data copyin(a) copyin(readonly: b, c)
- !$acc end data
-
-! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "a"}
-! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copyin_readonly>, name = "b"}
-! CHECK: %[[COPYIN_C:.*]] = acc.copyin varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copyin_readonly>, name = "c"}
-! CHECK: acc.data dataOperands(%[[COPYIN_A]], %[[COPYIN_B]], %[[COPYIN_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK: acc.delete accPtr(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc<data_clause acc_copyin>, name = "a"}
-! CHECK: acc.delete accPtr(%[[COPYIN_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc<data_clause acc_copyin_readonly>, name = "b"}
-! CHECK: acc.delete accPtr(%[[COPYIN_C]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc<data_clause acc_copyin_readonly>, name = "c"}
-
- !$acc data copyout(a) copyout(zero: b) copyout(c)
- !$acc end data
-
-! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a"}
-! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copyout_zero>, name = "b"}
-! CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "c"}
-! CHECK: acc.data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK:acc.copyout accPtr(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a"}
-! CHECK:acc.copyout accPtr(%[[CREATE_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copyout_zero>, name = "b"}
-! CHECK:acc.copyout accPtr(%[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "c"}
-
- !$acc data create(a, b) create(zero: c)
- !$acc end data
-
-! CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "a"}
-! CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "b"}
-! CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_create_zero>, name = "c"}
-! CHECK: acc.data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK: acc.delete accPtr(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc<data_clause acc_create>, name = "a"}
-! CHECK: acc.delete accPtr(%[[CREATE_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc<data_clause acc_create>, name = "b"}
-! CHECK: acc.delete accPtr(%[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc<data_clause acc_create_zero>, name = "c"}
-
- !$acc data create(c) copy(b) create(a)
- !$acc end data
-! CHECK:%[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "c"}
-! CHECK:%[[COPY_B:.*]] = acc.copyin varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "b"}
-! CHECK:%[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "a"}
-! CHECK: acc.data dataOperands(%[[CREATE_C]], %[[COPY_B]], %[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-
- !$acc data no_create(a, b) create(zero: c)
- !$acc end data
-
-! CHECK: %[[NO_CREATE_A:.*]] = acc.nocreate varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "a"}
-! CHECK: %[[NO_CREATE_B:.*]] = acc.nocreate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "b"}
-! CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_create_zero>, name = "c"}
-! CHECK: acc.data dataOperands(%[[NO_CREATE_A]], %[[NO_CREATE_B]], %[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK: acc.delete accPtr(%[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {dataClause = #acc<data_clause acc_create_zero>, name = "c"}
-
- !$acc data present(a, b, c)
- !$acc end data
-
-! CHECK: %[[PRESENT_A:.*]] = acc.present varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "a"}
-! CHECK: %[[PRESENT_B:.*]] = acc.present varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "b"}
-! CHECK: %[[PRESENT_C:.*]] = acc.present varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "c"}
-! CHECK: acc.data dataOperands(%[[PRESENT_A]], %[[PRESENT_B]], %[[PRESENT_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-
- !$acc data deviceptr(b, c)
- !$acc end data
-
-! CHECK: %[[DEVICEPTR_B:.*]] = acc.deviceptr varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "b"}
-! CHECK: %[[DEVICEPTR_C:.*]] = acc.deviceptr varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {name = "c"}
-! CHECK: acc.data dataOperands(%[[DEVICEPTR_B]], %[[DEVICEPTR_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-
- !$acc data attach(d, e)
- !$acc end data
-
-! CHECK: %[[ATTACH_D:.*]] = acc.attach varPtr(%{{.*}} : !fir.ptr<f32>) -> !fir.ptr<f32> {name = "d"}
-! CHECK: %[[ATTACH_E:.*]] = acc.attach varPtr(%{{.*}} : !fir.ptr<f32>) -> !fir.ptr<f32> {name = "e"}
-! CHECK: acc.data dataOperands(%[[ATTACH_D]], %[[ATTACH_E]] : !fir.ptr<f32>, !fir.ptr<f32>) {
-! CHECK: acc.terminator
-! CHECK-NEXT: }{{$}}
-! CHECK: acc.detach accPtr(%[[ATTACH_D]] : !fir.ptr<f32>) {dataClause = #acc<data_clause acc_attach>, name = "d"}
-! CHECK: acc.detach accPtr(%[[ATTACH_E]] : !fir.ptr<f32>) {dataClause = #acc<data_clause acc_attach>, name = "e"}
-
- !$acc data present(a) async
- !$acc end data
-
-! CHECK: acc.data async dataOperands(%{{.*}}) {
-! CHECK: }
-
- !$acc data copy(a) async(1)
- !$acc end data
-
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%{{.*}} : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC:.*]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: acc.data async(%[[ASYNC]] : i32) dataOperands(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) {
-! CHECK: }{{$}}
-! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC]] : i32) to varPtr(%{{.*}} : !fir.ref<!fir.array<10x10xf32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
-
- !$acc data present(a) wait
- !$acc end data
-
-! CHECK: acc.data dataOperands(%{{.*}}) wait {
-! CHECK: }
-
- !$acc data present(a) wait(1)
- !$acc end data
-
-! CHECK: acc.data dataOperands(%{{.*}}) wait({%{{.*}} : i32}) {
-! CHECK: }{{$}}
-
- !$acc data present(a) wait(devnum: 0: 1)
- !$acc end data
-
-! CHECK: acc.data dataOperands(%{{.*}}) wait({devnum: %{{.*}} : i32, %{{.*}} : i32}) {
-! CHECK: }{{$}}
-
- !$acc data default(none)
- !$acc end data
-
-! CHECK: acc.data {
-! CHECK: acc.terminator
-! CHECK: } attributes {defaultAttr = #acc<defaultvalue none>}
-
- !$acc data default(present)
- !$acc end data
-
-! CHECK: acc.data {
-! CHECK: acc.terminator
-! CHECK: } attributes {defaultAttr = #acc<defaultvalue present>}
-
- !$acc data
- !$acc end data
-! CHECK-NOT: acc.data
-
-end subroutine acc_data
diff --git a/flang/test/Lower/OpenACC/acc-declare-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-declare-unwrap-defaultbounds.f90
deleted file mode 100644
index 4b181f8a26987..0000000000000
--- a/flang/test/Lower/OpenACC/acc-declare-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,478 +0,0 @@
-! This test checks lowering of OpenACC declare directive in function and
-! subroutine specification parts.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-module acc_declare
- contains
-
- subroutine acc_declare_copy()
- integer :: a(100), i
- !$acc declare copy(a)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_copy()
-! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-! CHECK-DAG: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_copyEa"}
-! CHECK-DAG: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_copy>, uniq_name = "_QMacc_declareFacc_declare_copyEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xi32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) {
-! CHECK: }
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[COPYIN]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) to varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xi32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: return
-
- subroutine acc_declare_create()
- integer :: a(100), i
- !$acc declare create(a)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_create() {
-! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-! CHECK-DAG: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_createEa"}
-! CHECK-DAG: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_create>, uniq_name = "_QMacc_declareFacc_declare_createEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xi32>> {name = "a"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) {
-! CHECK: }
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: acc.delete accPtr(%[[CREATE]] : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) {dataClause = #acc<data_clause acc_create>, name = "a"}
-! CHECK: return
-
- subroutine acc_declare_present(a)
- integer :: a(100), i
- !$acc declare present(a)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_present(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"})
-! CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
-! CHECK-DAG: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare<dataClause = acc_present>, uniq_name = "_QMacc_declareFacc_declare_presentEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>, !fir.dscope) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%[[C1]] : index)
-! CHECK: %[[PRESENT:.*]] = acc.present varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xi32>> {name = "a"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[PRESENT]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32)
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[PRESENT]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: acc.delete accPtr(%[[PRESENT]] : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) {dataClause = #acc<data_clause acc_present>, name = "a"}
-
- subroutine acc_declare_copyin()
- integer :: a(100), b(10), i
- !$acc declare copyin(a) copyin(readonly: b)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_copyin()
-! CHECK: %[[A:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_copyinEa"}
-! CHECK: %[[ADECL:.*]]:2 = hlfir.declare %[[A]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_copyin>, uniq_name = "_QMacc_declareFacc_declare_copyinEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[B:.*]] = fir.alloca !fir.array<10xi32> {bindc_name = "b", uniq_name = "_QMacc_declareFacc_declare_copyinEb"}
-! CHECK: %[[BDECL:.*]]:2 = hlfir.declare %[[B]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_copyin_readonly>, uniq_name = "_QMacc_declareFacc_declare_copyinEb"} : (!fir.ref<!fir.array<10xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>)
-! CHECK: %[[BOUND_A:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index)
-! CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[ADECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND_A]]) -> !fir.ref<!fir.array<100xi32>> {name = "a"}
-! CHECK: %[[BOUND_B:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index)
-! CHECK: %[[COPYIN_B:.*]] = acc.copyin varPtr(%[[BDECL]]#0 : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND_B]]) -> !fir.ref<!fir.array<10xi32>> {dataClause = #acc<data_clause acc_copyin_readonly>, name = "b"}
-! CHECK: acc.declare_enter dataOperands(%[[COPYIN_A]], %[[COPYIN_B]] : !fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<10xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32)
-! CHECK: acc.delete accPtr(%[[COPYIN_A]] : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND_A]]) {dataClause = #acc<data_clause acc_copyin>, name = "a"}
-! CHECK: acc.delete accPtr(%[[COPYIN_B]] : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND_B]]) {dataClause = #acc<data_clause acc_copyin_readonly>, name = "b"}
-
- subroutine acc_declare_copyout()
- integer :: a(100), i
- !$acc declare copyout(a)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_copyout()
-! CHECK: %[[A:.*]] = fir.alloca !fir.array<100xi32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_copyoutEa"}
-! CHECK: %[[ADECL:.*]]:2 = hlfir.declare %[[A]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_copyout>, uniq_name = "_QMacc_declareFacc_declare_copyoutEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {dataClause = #acc<data_clause acc_copyout>, name = "a"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32)
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) to varPtr(%[[ADECL]]#0 : !fir.ref<!fir.array<100xi32>>) {name = "a"}
-! CHECK: return
-
- subroutine acc_declare_deviceptr(a)
- integer :: a(100), i
- !$acc declare deviceptr(a)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_deviceptr(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}) {
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare<dataClause = acc_deviceptr>, uniq_name = "_QMacc_declareFacc_declare_deviceptrEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>, !fir.dscope) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[DEVICEPTR:.*]] = acc.deviceptr varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {name = "a"}
-! CHECK: acc.declare_enter dataOperands(%[[DEVICEPTR]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32)
-
- subroutine acc_declare_link(a)
- integer :: a(100), i
- !$acc declare link(a)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_link(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"})
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare<dataClause = acc_declare_link>, uniq_name = "_QMacc_declareFacc_declare_linkEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>, !fir.dscope) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[LINK:.*]] = acc.declare_link varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {name = "a"}
-! CHECK: acc.declare_enter dataOperands(%[[LINK]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32)
-
- subroutine acc_declare_device_resident(a)
- integer :: a(100), i
- !$acc declare device_resident(a)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_device_resident(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"})
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare<dataClause = acc_declare_device_resident>, uniq_name = "_QMacc_declareFacc_declare_device_residentEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>, !fir.dscope) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[DEVICERES:.*]] = acc.declare_device_resident varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {name = "a"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICERES]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%arg{{.*}} = %{{.*}}) -> (index, i32)
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICERES]] : !fir.ref<!fir.array<100xi32>>)
-! CHECK: acc.delete accPtr(%[[DEVICERES]] : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) {dataClause = #acc<data_clause acc_declare_device_resident>, name = "a"}
-
- subroutine acc_declare_device_resident2()
- integer, parameter :: n = 100
- real, dimension(n) :: dataparam
- !$acc declare device_resident(dataparam)
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_device_resident2()
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> {bindc_name = "dataparam", uniq_name = "_QMacc_declareFacc_declare_device_resident2Edataparam"}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_declare_device_resident>, uniq_name = "_QMacc_declareFacc_declare_device_resident2Edataparam"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: %[[DEVICERES:.*]] = acc.declare_device_resident varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xf32>> {name = "dataparam"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICERES]] : !fir.ref<!fir.array<100xf32>>)
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICERES]] : !fir.ref<!fir.array<100xf32>>)
-! CHECK: acc.delete accPtr(%[[DEVICERES]] : !fir.ref<!fir.array<100xf32>>) bounds(%{{.*}}) {dataClause = #acc<data_clause acc_declare_device_resident>, name = "dataparam"}
-
- subroutine acc_declare_link2()
- integer, parameter :: n = 100
- real, dimension(n) :: dataparam
- !$acc declare link(dataparam)
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_link2()
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> {bindc_name = "dataparam", uniq_name = "_QMacc_declareFacc_declare_link2Edataparam"}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_declare_link>, uniq_name = "_QMacc_declareFacc_declare_link2Edataparam"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: %[[LINK:.*]] = acc.declare_link varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xf32>> {name = "dataparam"}
-! CHECK: acc.declare_enter dataOperands(%[[LINK]] : !fir.ref<!fir.array<100xf32>>)
-
- subroutine acc_declare_deviceptr2()
- integer, parameter :: n = 100
- real, dimension(n) :: dataparam
- !$acc declare deviceptr(dataparam)
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_deviceptr2()
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32> {bindc_name = "dataparam", uniq_name = "_QMacc_declareFacc_declare_deviceptr2Edataparam"}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_deviceptr>, uniq_name = "_QMacc_declareFacc_declare_deviceptr2Edataparam"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: %[[DEVICEPTR:.*]] = acc.deviceptr varPtr(%[[DECL]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xf32>> {name = "dataparam"}
-! CHECK: acc.declare_enter dataOperands(%[[DEVICEPTR]] : !fir.ref<!fir.array<100xf32>>)
-
- function acc_declare_in_func()
- real :: a(1024)
- !$acc declare device_resident(a)
- end function
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_in_func() -> f32 {
-! CHECK: %[[DEVICE_RESIDENT:.*]] = acc.declare_device_resident varPtr(%{{.*}}#0 : !fir.ref<!fir.array<1024xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<1024xf32>> {name = "a"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[DEVICE_RESIDENT]] : !fir.ref<!fir.array<1024xf32>>)
-! CHECK: %[[LOAD:.*]] = fir.load %{{.*}}#0 : !fir.ref<f32>
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[DEVICE_RESIDENT]] : !fir.ref<!fir.array<1024xf32>>)
-! CHECK: acc.delete accPtr(%[[DEVICE_RESIDENT]] : !fir.ref<!fir.array<1024xf32>>) bounds(%{{.*}}) {dataClause = #acc<data_clause acc_declare_device_resident>, name = "a"}
-! CHECK: return %[[LOAD]] : f32
-! CHECK: }
-
- function acc_declare_in_func2(i)
- real :: a(1024)
- integer :: i
- !$acc declare create(a)
- return
- end function
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_in_func2(%arg0: !fir.ref<i32> {fir.bindc_name = "i"}) -> f32 {
-! CHECK: %[[ALLOCA_A:.*]] = fir.alloca !fir.array<1024xf32> {bindc_name = "a", uniq_name = "_QMacc_declareFacc_declare_in_func2Ea"}
-! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ALLOCA_A]](%{{.*}}) {acc.declare = #acc.declare<dataClause = acc_create>, uniq_name = "_QMacc_declareFacc_declare_in_func2Ea"} : (!fir.ref<!fir.array<1024xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<1024xf32>>, !fir.ref<!fir.array<1024xf32>>)
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL_A]]#0 : !fir.ref<!fir.array<1024xf32>>) bounds(%{{[0-9]+}}) -> !fir.ref<!fir.array<1024xf32>> {name = "a"}
-! CHECK: %[[TOKEN:.*]] = acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref<!fir.array<1024xf32>>)
-! CHECK: cf.br ^bb1
-! CHECK: ^bb1:
-! CHECK: acc.declare_exit token(%[[TOKEN]]) dataOperands(%[[CREATE]] : !fir.ref<!fir.array<1024xf32>>)
-! CHECK: acc.delete accPtr(%[[CREATE]] : !fir.ref<!fir.array<1024xf32>>) bounds(%{{[0-9]+}}) {dataClause = #acc<data_clause acc_create>, name = "a"}
-! CHECK: return %{{.*}} : f32
-! CHECK: }
-
- subroutine acc_declare_allocate()
- integer, allocatable :: a(:)
- !$acc declare create(a)
-
- allocate(a(100))
-
-! CHECK: %{{.*}} = fir.allocmem !fir.array<?xi32>, %{{.*}} {fir.must_be_heap = true, uniq_name = "_QMacc_declareFacc_declare_allocateEa.alloc"}
-! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declareFacc_declare_allocateEa_acc_declare_post_alloc>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-
- deallocate(a)
-
-! CHECK: %{{.*}} = fir.box_addr %{{.*}} {acc.declare_action = #acc.declare_action<preDealloc = @_QMacc_declareFacc_declare_allocateEa_acc_declare_pre_dealloc>} : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-
-! CHECK: fir.freemem %{{.*}} : !fir.heap<!fir.array<?xi32>>
-! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action<postDealloc = @_QMacc_declareFacc_declare_allocateEa_acc_declare_post_dealloc>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-
-! CHECK: fir.if
-! CHECK: fir.freemem %{{.*}} : !fir.heap<!fir.array<?xi32>>
-! CHECK: fir.store %{{.*}} to %{{.*}}#0 {acc.declare_action = #acc.declare_action<postDealloc = @_QMacc_declareFacc_declare_allocateEa_acc_declare_post_dealloc>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: }
-
- end subroutine
-
-! CHECK-LABEL: func.func private @_QMacc_declareFacc_declare_allocateEa_acc_declare_post_alloc(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) {
-! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[ARG0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {implicit = true, name = "a_desc", structured = false}
-! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
-! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare<dataClause = acc_create>} : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) -> !fir.heap<!fir.array<?xi32>> {name = "a", structured = false}
-! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xi32>>)
-! CHECK: return
-! CHECK: }
-
-! CHECK-LABEL: func.func private @_QMacc_declareFacc_declare_allocateEa_acc_declare_pre_dealloc(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) {
-! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare<dataClause = acc_create>} : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: %[[GETDEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) -> !fir.heap<!fir.array<?xi32>> {dataClause = #acc<data_clause acc_create>, name = "a", structured = false}
-! CHECK: acc.declare_exit dataOperands(%[[GETDEVICEPTR]] : !fir.heap<!fir.array<?xi32>>)
-! CHECK: acc.delete accPtr(%[[GETDEVICEPTR]] : !fir.heap<!fir.array<?xi32>>) {dataClause = #acc<data_clause acc_create>, name = "a", structured = false}
-! CHECK: return
-! CHECK: }
-
-! CHECK-LABEL: func.func private @_QMacc_declareFacc_declare_allocateEa_acc_declare_post_dealloc(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) {
-! CHECK: %[[LOAD:.*]] = fir.load %[[ARG0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) -> !fir.heap<!fir.array<?xi32>> {implicit = true, name = "a_desc", structured = false}
-! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.heap<!fir.array<?xi32>>)
-! CHECK: return
-! CHECK: }
-
- subroutine acc_declare_multiple_directive(a, b)
- integer :: a(100), b(100), i
- !$acc declare copy(a)
- !$acc declare copyout(b)
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_multiple_directive(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "b"}) {
-! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare<dataClause = acc_copy>, uniq_name = "_QMacc_declareFacc_declare_multiple_directiveEa"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>, !fir.dscope) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[DECL_B:.*]]:2 = hlfir.declare %[[ARG1]](%{{.*}}) dummy_scope %{{[0-9]+}} {acc.declare = #acc.declare<dataClause = acc_copyout>, uniq_name = "_QMacc_declareFacc_declare_multiple_directiveEb"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>, !fir.dscope) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[DECL_A]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {dataClause = #acc<data_clause acc_copy>, name = "a"}
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECL_B]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {dataClause = #acc<data_clause acc_copyout>, name = "b"}
-! CHECK: acc.declare_enter dataOperands(%[[COPYIN]], %[[CREATE]] : !fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: %{{.*}}:{{.*}} = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%{{.*}} = %{{.*}}) -> (index, i32) {
-
-
-! CHECK: acc.copyout accPtr(%[[CREATE]] : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) to varPtr(%[[DECL_B]]#0 : !fir.ref<!fir.array<100xi32>>) {name = "b"}
-! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) to varPtr(%[[DECL_A]]#0 : !fir.ref<!fir.array<100xi32>>) {dataClause = #acc<data_clause acc_copy>, name = "a"}
-
- subroutine acc_declare_array_section(a)
- integer :: a(:)
- !$acc declare copy(a(1:10))
-
- do i = 1, 100
- a(i) = i
- end do
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_array_section(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}) {
-! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {uniq_name = "_QMacc_declareFacc_declare_array_sectionEa"} : (!fir.box<!fir.array<?xi32>>, !fir.dscope) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 {acc.declare = #acc.declare<dataClause = acc_copy>} : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<?xi32>> {dataClause = #acc<data_clause acc_copy>, name = "a(1:10)"}
-! CHECK: acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref<!fir.array<?xi32>>)
-
-! CHECK: acc.copyout accPtr(%[[COPYIN]] : !fir.ref<!fir.array<?xi32>>) bounds(%{{.*}}) to varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xi32>>) {dataClause = #acc<data_clause acc_copy>, name = "a(1:10)"}
-
- subroutine acc_declare_allocate_with_stat()
- integer :: status
- real, pointer, dimension(:) :: localptr
- !$acc declare create(localptr)
- allocate(localptr(n), stat=status)
-
- deallocate(localptr, stat=status)
- end subroutine
-
-! CHECK-LABEL: func.func @_QMacc_declarePacc_declare_allocate_with_stat()
-! CHECK: fir.call @_FortranAPointerAllocate(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declareFacc_declare_allocate_with_statElocalptr_acc_declare_post_alloc>}
-! CHECK: fir.call @_FortranAPointerDeallocate(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}}) {{.*}} {acc.declare_action = #acc.declare_action<preDealloc = @_QMacc_declareFacc_declare_allocate_with_statElocalptr_acc_declare_pre_dealloc, postDealloc = @_QMacc_declareFacc_declare_allocate_with_statElocalptr_acc_declare_post_dealloc>}
-end module
-
-module acc_declare_allocatable_test
- integer, allocatable :: data1(:)
- !$acc declare create(data1)
-end module
-
-! CHECK-LABEL: acc.global_ctor @_QMacc_declare_allocatable_testEdata1_acc_ctor {
-! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) {acc.declare = #acc.declare<dataClause = acc_create>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[COPYIN:.*]] = acc.copyin varPtr(%[[GLOBAL_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {dataClause = #acc<data_clause acc_create>, implicit = true, name = "data1", structured = false}
-! CHECK: acc.declare_enter dataOperands(%[[COPYIN]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
-! CHECK: acc.terminator
-! CHECK: }
-
-! CHECK-LABEL: func.func private @_QMacc_declare_allocatable_testEdata1_acc_declare_post_alloc() {
-! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[GLOBAL_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {implicit = true, name = "data1_desc", structured = false}
-! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
-! CHECK: %[[LOAD:.*]] = fir.load %[[GLOBAL_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[BOXADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare<dataClause = acc_create>} : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOXADDR]] : !fir.heap<!fir.array<?xi32>>) -> !fir.heap<!fir.array<?xi32>> {name = "data1", structured = false}
-! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xi32>>)
-! CHECK: return
-! CHECK: }
-
-! CHECK-LABEL: func.func private @_QMacc_declare_allocatable_testEdata1_acc_declare_pre_dealloc() {
-! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[LOAD:.*]] = fir.load %[[GLOBAL_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[BOXADDR:.*]] = fir.box_addr %[[LOAD]] {acc.declare = #acc.declare<dataClause = acc_create>} : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[BOXADDR]] : !fir.heap<!fir.array<?xi32>>) -> !fir.heap<!fir.array<?xi32>> {dataClause = #acc<data_clause acc_create>, name = "data1", structured = false}
-! CHECK: acc.declare_exit dataOperands(%[[DEVPTR]] : !fir.heap<!fir.array<?xi32>>)
-! CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.heap<!fir.array<?xi32>>) {dataClause = #acc<data_clause acc_create>, name = "data1", structured = false}
-! CHECK: return
-! CHECK: }
-
-! CHECK-LABEL: func.func private @_QMacc_declare_allocatable_testEdata1_acc_declare_post_dealloc() {
-! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[UPDATE:.*]] = acc.update_device varPtr(%[[GLOBAL_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {implicit = true, name = "data1_desc", structured = false}
-! CHECK: acc.update dataOperands(%[[UPDATE]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
-! CHECK: return
-! CHECK: }
-
-! CHECK-LABEL: acc.global_dtor @_QMacc_declare_allocatable_testEdata1_acc_dtor {
-! CHECK: %[[GLOBAL_ADDR:.*]] = fir.address_of(@_QMacc_declare_allocatable_testEdata1) {acc.declare = #acc.declare<dataClause = acc_create>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[DEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[GLOBAL_ADDR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {dataClause = #acc<data_clause acc_create>, name = "data1", structured = false}
-! CHECK: acc.declare_exit dataOperands(%[[DEVICEPTR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
-! CHECK: acc.delete accPtr(%[[DEVICEPTR]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>) {dataClause = #acc<data_clause acc_create>, name = "data1", structured = false}
-! CHECK: acc.terminator
-! CHECK: }
-
-
-module acc_declare_equivalent
- integer, parameter :: n = 10
- real :: v1(n)
- real :: v2(n)
- equivalence(v1(1), v2(1))
- !$acc declare create(v2)
-end module
-
-! CHECK-LABEL: acc.global_ctor @_QMacc_declare_equivalentEv2_acc_ctor {
-! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalentEv1) {acc.declare = #acc.declare<dataClause = acc_create>} : !fir.ref<!fir.array<40xi8>>
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<40xi8>>) -> !fir.ref<!fir.array<40xi8>> {name = "v2", structured = false}
-! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref<!fir.array<40xi8>>)
-! CHECK: acc.terminator
-! CHECK: }
-! CHECK-LABEL: acc.global_dtor @_QMacc_declare_equivalentEv2_acc_dtor {
-! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalentEv1) {acc.declare = #acc.declare<dataClause = acc_create>} : !fir.ref<!fir.array<40xi8>>
-! CHECK: %[[DEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[ADDR]] : !fir.ref<!fir.array<40xi8>>) -> !fir.ref<!fir.array<40xi8>> {dataClause = #acc<data_clause acc_create>, name = "v2", structured = false}
-! CHECK: acc.declare_exit dataOperands(%[[DEVICEPTR]] : !fir.ref<!fir.array<40xi8>>)
-! CHECK: acc.delete accPtr(%[[DEVICEPTR]] : !fir.ref<!fir.array<40xi8>>) {dataClause = #acc<data_clause acc_create>, name = "v2", structured = false}
-! CHECK: acc.terminator
-! CHECK: }
-
-module acc_declare_equivalent2
- real :: v1(10)
- real :: v2(5)
- equivalence(v1(6), v2(1))
- !$acc declare create(v2)
-end module
-
-! CHECK-LABEL: acc.global_ctor @_QMacc_declare_equivalent2Ev2_acc_ctor {
-! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalent2Ev1) {acc.declare = #acc.declare<dataClause = acc_create>} : !fir.ref<!fir.array<40xi8>>
-! CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<40xi8>>) -> !fir.ref<!fir.array<40xi8>> {name = "v2", structured = false}
-! CHECK: acc.declare_enter dataOperands(%[[CREATE]] : !fir.ref<!fir.array<40xi8>>)
-! CHECK: acc.terminator
-! CHECK: }
-! CHECK-LABEL: acc.global_dtor @_QMacc_declare_equivalent2Ev2_acc_dtor {
-! CHECK: %[[ADDR:.*]] = fir.address_of(@_QMacc_declare_equivalent2Ev1) {acc.declare = #acc.declare<dataClause = acc_create>} : !fir.ref<!fir.array<40xi8>>
-! CHECK: %[[DEVICEPTR:.*]] = acc.getdeviceptr varPtr(%[[ADDR]] : !fir.ref<!fir.array<40xi8>>) -> !fir.ref<!fir.array<40xi8>> {dataClause = #acc<data_clause acc_create>, name = "v2", structured = false}
-! CHECK: acc.declare_exit dataOperands(%[[DEVICEPTR]] : !fir.ref<!fir.array<40xi8>>)
-! CHECK: acc.delete accPtr(%[[DEVICEPTR]] : !fir.ref<!fir.array<40xi8>>) {dataClause = #acc<data_clause acc_create>, name = "v2", structured = false}
-! CHECK: acc.terminator
-! CHECK: }
-
-! Test that the pre/post alloc/dealloc attributes are set when the
-! allocate/deallocate statement are in a different module.
-module acc_declare_allocatable_test2
-contains
- subroutine init()
- use acc_declare_allocatable_test
- allocate(data1(100))
-! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_allocatable_testEdata1_acc_declare_post_alloc>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
- end subroutine
-
- subroutine finalize()
- use acc_declare_allocatable_test
- deallocate(data1)
-! CHECK: %{{.*}} = fir.box_addr %{{.*}} {acc.declare_action = #acc.declare_action<preDealloc = @_QMacc_declare_allocatable_testEdata1_acc_declare_pre_dealloc>} : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: fir.store %{{.*}} to %{{.*}} {acc.declare_action = #acc.declare_action<postDealloc = @_QMacc_declare_allocatable_testEdata1_acc_declare_post_dealloc>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
- end subroutine
-end module
-
-module acc_declare_allocatable_test3
- integer, allocatable :: data1(:)
- integer, allocatable :: data2(:)
- !$acc declare create(data1, data2, data1)
-end module
-
-! CHECK-LABEL: acc.global_ctor @_QMacc_declare_allocatable_test3Edata1_acc_ctor
-! CHECK-LABEL: acc.global_ctor @_QMacc_declare_allocatable_test3Edata2_acc_ctor
-
-module acc_declare_post_action_stat
- real, dimension(:), allocatable :: x, y
- !$acc declare create(x,y)
-
-contains
-
- subroutine init()
- integer :: stat
- allocate(x(10), y(10), stat=stat)
- end subroutine
-end module
-
-! CHECK-LABEL: func.func @_QMacc_declare_post_action_statPinit()
-! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEx_acc_declare_post_alloc>} : (!fir.ref<!fir.box<none>>, !fir.ref<i64>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
-! CHECK: fir.if
-! CHECK: fir.call @_FortranAAllocatableAllocate({{.*}}) fastmath<contract> {acc.declare_action = #acc.declare_action<postAlloc = @_QMacc_declare_post_action_statEy_acc_declare_post_alloc>} : (!fir.ref<!fir.box<none>>, !fir.ref<i64>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
diff --git a/flang/test/Lower/OpenACC/acc-enter-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-enter-data-unwrap-defaultbounds.f90
deleted file mode 100644
index 3e08068bdec44..0000000000000
--- a/flang/test/Lower/OpenACC/acc-enter-data-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,818 +0,0 @@
-! This test checks lowering of OpenACC enter data directive.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-subroutine acc_enter_data
- integer :: async = 1
- real, dimension(10, 10) :: a, b, c
- real, pointer :: d
- logical :: ifCondition = .TRUE.
-
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[EXTENT_C10:.*]] = arith.constant 10 : index
-!CHECK: %[[A:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ea"}
-!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-!CHECK: %[[B:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Eb"}
-!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-!CHECK: %[[C:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ec"}
-!CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]]
-!CHECK: %[[D:.*]] = fir.alloca !fir.box<!fir.ptr<f32>> {bindc_name = "d", uniq_name = "{{.*}}Ed"}
-!CHECK: %[[DECLD:.*]]:2 = hlfir.declare %[[D]]
-
- !$acc enter data create(a)
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>){{$}}
-
- !$acc enter data create(a) if(.true.)
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: [[IF1:%.*]] = arith.constant true
-!CHECK: acc.enter_data if([[IF1]]) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>){{$}}
-
- !$acc enter data create(a) if(ifCondition)
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: [[IFCOND:%.*]] = fir.load %{{.*}} : !fir.ref<!fir.logical<4>>
-!CHECK: [[IF2:%.*]] = fir.convert [[IFCOND]] : (!fir.logical<4>) -> i1
-!CHECK: acc.enter_data if([[IF2]]) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>){{$}}
-
- !$acc enter data create(a) create(b) create(c)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "b", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "c", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>){{$}}
-
- !$acc enter data create(a) create(b) create(zero: c)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "b", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_C:.*]] = acc.create varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_create_zero>, name = "c", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE_A]], %[[CREATE_B]], %[[CREATE_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>){{$}}
-
- !$acc enter data copyin(a) create(b) attach(d)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10_{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_B:.*]] = acc.create varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "b", structured = false}
-!CHECK: %[[BOX_D:.*]] = fir.load %[[DECLD]]#0 : !fir.ref<!fir.box<!fir.ptr<f32>>>
-!CHECK: %[[BOX_ADDR_D:.*]] = fir.box_addr %[[BOX_D]] : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32>
-!CHECK: %[[ATTACH_D:.*]] = acc.attach varPtr(%[[BOX_ADDR_D]] : !fir.ptr<f32>) -> !fir.ptr<f32> {name = "d", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]], %[[CREATE_B]], %[[ATTACH_D]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ptr<f32>){{$}}
-
- !$acc enter data create(a) async
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) async -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data async dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data create(a) wait
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data wait dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data create(a) async wait
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) async -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data async wait dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data create(a) async(1)
-!CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) async(%[[ASYNC1]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data async(%[[ASYNC1]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data create(a) async(async)
-!CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref<i32>
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) async(%[[ASYNC2]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data async(%[[ASYNC2]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data create(a) wait(1)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[WAIT1:.*]] = arith.constant 1 : i32
-!CHECK: acc.enter_data wait(%[[WAIT1]] : i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data create(a) wait(queues: 1, 2)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[WAIT2:.*]] = arith.constant 1 : i32
-!CHECK: %[[WAIT3:.*]] = arith.constant 2 : i32
-!CHECK: acc.enter_data wait(%[[WAIT2]], %[[WAIT3]] : i32, i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data create(a) wait(devnum: 1: queues: 1, 2)
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE_A:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a", structured = false}
-!CHECK: %[[WAIT4:.*]] = arith.constant 1 : i32
-!CHECK: %[[WAIT5:.*]] = arith.constant 2 : i32
-!CHECK: %[[WAIT6:.*]] = arith.constant 1 : i32
-!CHECK: acc.enter_data wait_devnum(%[[WAIT6]] : i32) wait(%[[WAIT4]], %[[WAIT5]] : i32, i32) dataOperands(%[[CREATE_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data copyin(a(1:10,1:5))
-!CHECK: %[[BOUND0:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND0]], %[[BOUND1]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a(1:10,1:5)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data copyin(a(1:,1:5))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[LB1:.*]] = arith.constant 0 : index
-!CHECK: %[[UB1:.*]] = arith.subi %c10{{.*}}, %[[ONE]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB1]] : index) upperbound(%[[UB1]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index)
-!CHECK: %[[LB2:.*]] = arith.constant 0 : index
-!CHECK: %[[UB2:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB2]] : index) upperbound(%[[UB2]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%c1{{.*}} : index)
-!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a(1:,1:5)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data copyin(a(:10,1:5))
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[UB1:.*]] = arith.constant 9 : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB1]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB2:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB2]] : index) extent(%[[EXTENT_C10]] : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a(:10,1:5)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[COPYIN_A]] : !fir.ref<!fir.array<10x10xf32>>)
-
- !$acc enter data copyin(a(:,:))
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[UB:.*]] = arith.subi %c10{{.*}}, %[[ONE]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[UB:.*]] = arith.subi %c10{{.*}}, %[[ONE]] : index
-!CHECK: %[[BOUND2:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[COPYIN_A:.*]] = acc.copyin varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%[[BOUND1]], %[[BOUND2]]) -> !fir.ref<!fir.array<10x10xf32>> {name = "a(:,:)", structured = false}
-end subroutine acc_enter_data
-
-subroutine acc_enter_data_dummy(a, b, n, m)
- integer :: n, m
- real :: a(1:10)
- real :: b(n:m)
-
-!CHECK-LABEL: func.func @_QPacc_enter_data_dummy
-!CHECK-SAME: %[[A:.*]]: !fir.ref<!fir.array<10xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<!fir.array<?xf32>> {fir.bindc_name = "b"}, %[[N:.*]]: !fir.ref<i32> {fir.bindc_name = "n"}, %[[M:.*]]: !fir.ref<i32> {fir.bindc_name = "m"}
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-!CHECK: %[[DECLN:.*]]:2 = hlfir.declare %[[N]]
-!CHECK: %[[DECLM:.*]]:2 = hlfir.declare %[[M]]
-!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref<i32>
-!CHECK: %[[N_I64:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64
-!CHECK: %[[N_IDX:.*]] = fir.convert %[[N_I64]] : (i64) -> index
-!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
-!CHECK: %[[M_I64:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64
-!CHECK: %[[M_IDX:.*]] = fir.convert %[[M_I64]] : (i64) -> index
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[M_N:.*]] = arith.subi %[[M_IDX]], %[[N_IDX]] : index
-!CHECK: %[[M_N_1:.*]] = arith.addi %[[M_N]], %[[C1]] : index
-!CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[M_N_1]], %[[C0]] : index
-!CHECK: %[[EXT_B:.*]] = arith.select %[[CMP]], %[[M_N_1]], %[[C0]] : index
-!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-
- !$acc enter data create(a)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%c10{{.*}} : index) stride(%c1{{.*}} : index) startIdx(%{{.*}} : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
-
- !$acc enter data create(b)
-!CHECK: %[[DIMS:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) extent(%[[DIMS]]#1 : index) stride(%[[DIMS]]#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true}
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "b", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(5:10))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[LB1:.*]] = arith.constant 4 : index
-!CHECK: %[[UB1:.*]] = arith.constant 9 : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB1]] : index) upperbound(%[[UB1]] : index) extent(%c10{{.*}} : index) stride(%[[ONE]] : index) startIdx(%c1{{.*}} : index)
-!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND1]]) -> !fir.ref<!fir.array<10xf32>> {name = "a(5:10)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref<!fir.array<10xf32>>)
-
- !$acc enter data create(b(n:m))
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref<i32>
-!CHECK: %[[N_CONV1:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64
-!CHECK: %[[N_CONV2:.*]] = fir.convert %[[N_CONV1]] : (i64) -> index
-!CHECK: %[[LB:.*]] = arith.subi %[[N_CONV2]], %[[N_IDX]] : index
-!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
-!CHECK: %[[M_CONV1:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64
-!CHECK: %[[M_CONV2:.*]] = fir.convert %[[M_CONV1]] : (i64) -> index
-!CHECK: %[[UB:.*]] = arith.subi %[[M_CONV2]], %[[N_IDX]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT_B]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[N_IDX]] : index) {strideInBytes = true}
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND1]]) -> !fir.ref<!fir.array<?xf32>> {name = "b(n:m)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(b(n:))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0_8 : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVERT1_N:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64
-!CHECK: %[[CONVERT2_N:.*]] = fir.convert %[[CONVERT1_N]] : (i64) -> index
-!CHECK: %[[LB:.*]] = arith.subi %[[CONVERT2_N]], %[[N_IDX]] : index
-!CHECK: %[[UB:.*]] = arith.subi %[[EXT_B]], %c1{{.*}} : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXT_B]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[N_IDX]] : index) {strideInBytes = true}
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND1]]) -> !fir.ref<!fir.array<?xf32>> {name = "b(n:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(b(:))
-!CHECK: %[[ZERO:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[EXT_B]], %[[ONE]] : index
-!CHECK: %[[BOUND1:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[EXT_B]] : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[N_IDX]] : index) {strideInBytes = true}
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE1:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND1]]) -> !fir.ref<!fir.array<?xf32>> {name = "b(:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE1]] : !fir.ref<!fir.array<?xf32>>)
-
-end subroutine
-
-! Test lowering of array section for non default lower bound.
-subroutine acc_enter_data_non_default_lb()
- integer :: a(0:9)
- integer :: b(11:20)
-
-!CHECK-LABEL: func.func @_QPacc_enter_data_non_default_lb() {
-!CHECK: %[[BASELB:.*]] = arith.constant 0 : index
-!CHECK: %[[EXTENT_C10:.*]] = arith.constant 10 : index
-!CHECK: %[[A:.*]] = fir.alloca !fir.array<10xi32> {bindc_name = "a", uniq_name = "_QFacc_enter_data_non_default_lbEa"}
-!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-!CHECK: %[[B:.*]] = fir.alloca !fir.array<10xi32> {bindc_name = "b", uniq_name = "_QFacc_enter_data_non_default_lbEb"}
-!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-
- !$acc enter data create(a(5:9))
-!CHECK: %[[SECTIONLB:.*]] = arith.constant 5 : index
-!CHECK: %[[LB:.*]] = arith.subi %[[SECTIONLB]], %[[BASELB]] : index
-!CHECK: %[[SECTIONUB:.*]] = arith.constant 9 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[SECTIONUB]], %[[BASELB]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index)
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<10xi32>>) -> !fir.ref<!fir.array<10xi32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xi32>> {name = "a(5:9)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xi32>>)
-
- !$acc enter data create(a(:))
-!CHECK: %[[ZERO:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index)
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<10xi32>>) -> !fir.ref<!fir.array<10xi32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xi32>> {name = "a(:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xi32>>)
-
- !$acc enter data create(a(:6))
-!CHECK: %[[ZERO:.*]] = arith.constant 0 : index
-!CHECK: %[[SECTIONUB:.*]] = arith.constant 6 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[SECTIONUB]], %[[BASELB]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%c10{{.*}} : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index)
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<10xi32>>) -> !fir.ref<!fir.array<10xi32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xi32>> {name = "a(:6)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xi32>>)
-
- !$acc enter data create(a(4:))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[SECTIONLB:.*]] = arith.constant 4 : index
-!CHECK: %[[LB:.*]] = arith.subi %[[SECTIONLB]], %[[BASELB]] : index
-!CHECK: %[[UB:.*]] = arith.subi %[[EXTENT_C10]], %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[EXTENT_C10]] : index) stride(%{{.*}} : index) startIdx(%[[BASELB]] : index)
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<10xi32>>) -> !fir.ref<!fir.array<10xi32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xi32>> {name = "a(4:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xi32>>)
-
- !$acc enter data create(b)
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %c0{{.*}} : (!fir.box<!fir.array<10xi32>>, index) -> (index, index, index)
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS0]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS0]]#1 : index) stride(%{{.*}} : index) startIdx(%c11{{.*}} : index) {strideInBytes = true}
-!CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box<!fir.array<10xi32>>) -> !fir.ref<!fir.array<10xi32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ADDR]] : !fir.ref<!fir.array<10xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xi32>> {name = "b", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xi32>>)
-
-end subroutine
-
-! Test lowering of assumed size arrays.
-subroutine acc_enter_data_assumed(a, b, n, m)
- integer :: n, m
- real :: a(:)
- real :: b(10:)
-
-!CHECK-LABEL: func.func @_QPacc_enter_data_assumed(
-!CHECK-SAME: %[[A:.*]]: !fir.box<!fir.array<?xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.box<!fir.array<?xf32>> {fir.bindc_name = "b"}, %[[N:.*]]: !fir.ref<i32> {fir.bindc_name = "n"}, %[[M:.*]]: !fir.ref<i32> {fir.bindc_name = "m"}) {
-!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-!CHECK: %[[LB_C10:.*]] = arith.constant 10 : i64
-!CHECK: %[[LB_C10_IDX:.*]] = fir.convert %[[LB_C10]] : (i64) -> index
-!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-!CHECK: %[[DECLM:.*]]:2 = hlfir.declare %[[M]]
-!CHECK: %[[DECLN:.*]]:2 = hlfir.declare %[[N]]
-
- !$acc enter data create(a)
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS]]#1, %[[C1]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS]]#1 : index) stride(%[[DIMS]]#2 : index) startIdx(%[[C1]] : index) {strideInBytes = true}
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(:))
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS1]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(2:))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[LB:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS1]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(2:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(:4))
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.constant 3 : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(:4)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(6:10))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[LB:.*]] = arith.constant 5 : index
-!CHECK: %[[UB:.*]] = arith.constant 9 : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(6:10)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(n:))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-
-!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVERT1_N:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64
-!CHECK: %[[CONVERT2_N:.*]] = fir.convert %[[CONVERT1_N]] : (i64) -> index
-!CHECK: %[[LB:.*]] = arith.subi %[[CONVERT2_N]], %[[ONE]] : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS:.*]]:3 = fir.box_dims %[[DECLA]]#1, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(n:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(:m))
-!CHECK: %[[BASELB:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-
-!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVERT1_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64
-!CHECK: %[[CONVERT2_M:.*]] = fir.convert %[[CONVERT1_M]] : (i64) -> index
-!CHECK: %[[UB:.*]] = arith.subi %[[CONVERT2_M]], %[[ONE]] : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[BASELB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(:m)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(a(n:m))
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLA]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-
-!CHECK: %[[LOAD_N:.*]] = fir.load %[[DECLN]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVERT1_N:.*]] = fir.convert %[[LOAD_N]] : (i32) -> i64
-!CHECK: %[[CONVERT2_N:.*]] = fir.convert %[[CONVERT1_N]] : (i64) -> index
-!CHECK: %[[LB:.*]] = arith.subi %[[CONVERT2_N]], %[[ONE]] : index
-
-!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVERT1_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64
-!CHECK: %[[CONVERT2_M:.*]] = fir.convert %[[CONVERT1_M]] : (i64) -> index
-!CHECK: %[[UB:.*]] = arith.subi %[[CONVERT2_M]], %[[ONE]] : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLA]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[ONE]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLA]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "a(n:m)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(b(:m))
-!CHECK: %[[ZERO:.*]] = arith.constant 0 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-
-!CHECK: %[[LOAD_M:.*]] = fir.load %[[DECLM]]#0 : !fir.ref<i32>
-!CHECK: %[[CONVERT1_M:.*]] = fir.convert %[[LOAD_M]] : (i32) -> i64
-!CHECK: %[[CONVERT2_M:.*]] = fir.convert %[[CONVERT1_M]] : (i64) -> index
-!CHECK: %[[UB:.*]] = arith.subi %[[CONVERT2_M]], %[[LB_C10_IDX]] : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[DECLB]]#1, %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "b(:m)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
- !$acc enter data create(b)
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DECLB]]#0, %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS0]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS0]]#1 : index) stride(%[[DIMS0]]#2 : index) startIdx(%[[LB_C10_IDX]] : index) {strideInBytes = true}
-
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECLB]]#0 : (!fir.box<!fir.array<?xf32>>) -> !fir.ref<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xf32>> {name = "b", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<?xf32>>)
-
-end subroutine
-
-subroutine acc_enter_data_allocatable()
- real, allocatable :: a(:)
- integer, allocatable :: i
-
-!CHECK-LABEL: func.func @_QPacc_enter_data_allocatable() {
-!CHECK: %[[A:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a", uniq_name = "_QFacc_enter_data_allocatableEa"}
-!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-!CHECK: %[[I:.*]] = fir.alloca !fir.box<!fir.heap<i32>> {bindc_name = "i", uniq_name = "_QFacc_enter_data_allocatableEi"}
-!CHECK: %[[DECLI:.*]]:2 = hlfir.declare %[[I]]
-
- !$acc enter data create(a)
-
-!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0_0:.*]] = arith.constant 0 : index
-!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0_1:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %c0{{.*}} : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %c0{{.*}} : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS1]]#1, %c1{{.*}} : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS1]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
-
- !$acc enter data create(a(:))
-
-!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[ZERO:.*]] = arith.constant 0 : index
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-
-!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-
-!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS2]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB:.*]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
-
- !$acc enter data create(a(2:5))
-
-!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-
-!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[C2:.*]] = arith.constant 2 : index
-!CHECK: %[[LB:.*]] = arith.subi %[[C2]], %[[DIMS0]]#0 : index
-!CHECK: %[[C5:.*]] = arith.constant 5 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C5]], %[[DIMS0]]#0 : index
-!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(2:5)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
-
- !$acc enter data create(a(3:))
-
-!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-
-!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[C3:.*]] = arith.constant 3 : index
-!CHECK: %[[LB:.*]] = arith.subi %[[C3]], %[[DIMS0]]#0 : index
-
-!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS2]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(3:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
-
- !$acc enter data create(a(:7))
-
-!CHECK: %[[BOX_A_0:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[ZERO:.*]] = arith.constant 0 : index
-
-!CHECK: %[[BOX_A_1:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[BOX_A_1]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[BOX_A_0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[C7:.*]] = arith.constant 7 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C7]], %[[DIMS0]]#0 : index
-!CHECK: %[[BOX_A_2:.*]] = fir.load %[[DECLA]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS2:.*]]:3 = fir.box_dims %[[BOX_A_2]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[ZERO]] : index) upperbound(%[[UB]] : index) extent(%[[DIMS2]]#1 : index) stride(%[[DIMS1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_A_0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xf32>> {name = "a(:7)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xf32>>)
-
- !$acc enter data create(i)
-
-!CHECK: %[[BOX_I:.*]] = fir.load %[[DECLI]]#0 : !fir.ref<!fir.box<!fir.heap<i32>>>
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX_I]] : (!fir.box<!fir.heap<i32>>) -> !fir.heap<i32>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<i32>) -> !fir.heap<i32> {name = "i", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<i32>)
-
-end subroutine
-
-subroutine acc_enter_data_derived_type()
- type :: dt
- real :: data
- real :: array(1:10)
- end type
-
- type :: t
- type(dt) :: d
- end type
-
- type :: z
- integer, allocatable :: data(:)
- end type
-
- type :: tt
- type(dt) :: d(10)
- end type
-
- type(dt) :: a
- type(t) :: b
- type(dt) :: aa(10)
- type(z) :: c
- type(tt) :: d
-
-!CHECK-LABEL: func.func @_QPacc_enter_data_derived_type() {
-!CHECK: %[[A:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}> {bindc_name = "a", uniq_name = "_QFacc_enter_data_derived_typeEa"}
-!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-!CHECK: %[[AA:.*]] = fir.alloca !fir.array<10x!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>> {bindc_name = "aa", uniq_name = "_QFacc_enter_data_derived_typeEaa"}
-!CHECK: %[[DECLAA:.*]]:2 = hlfir.declare %[[AA]]
-!CHECK: %[[B:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTt{d:!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>}> {bindc_name = "b", uniq_name = "_QFacc_enter_data_derived_typeEb"}
-!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-!CHECK: %[[C:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTz{data:!fir.box<!fir.heap<!fir.array<?xi32>>>}> {bindc_name = "c", uniq_name = "_QFacc_enter_data_derived_typeEc"}
-!CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]]
-!CHECK: %[[D:.*]] = fir.alloca !fir.type<_QFacc_enter_data_derived_typeTtt{d:!fir.array<10x!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>}> {bindc_name = "d", uniq_name = "_QFacc_enter_data_derived_typeEd"}
-!CHECK: %[[DECLD:.*]]:2 = hlfir.declare %[[D]]
-
- !$acc enter data create(a%data)
-
-
-!CHECK: %[[DATA_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"data"} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>) -> !fir.ref<f32>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DATA_COORD]] : !fir.ref<f32>) -> !fir.ref<f32> {name = "a%data", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<f32>)
-
- !$acc enter data create(b%d%data)
-
-
-
-!CHECK: %[[D_COORD:.*]] = hlfir.designate %[[DECLB]]#0{"d"} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTt{d:!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>}>>) -> !fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>
-!CHECK: %[[DATA_COORD:.*]] = hlfir.designate %[[D_COORD]]{"data"} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>) -> !fir.ref<f32>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[DATA_COORD]] : !fir.ref<f32>) -> !fir.ref<f32> {name = "b%d%data", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<f32>)
-
- !$acc enter data create(a%array)
-
-
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a%array", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
-
- !$acc enter data create(a%array(:))
-
-
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a%array(:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
-
- !$acc enter data create(a%array(1:5))
-
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[C0:.*]] = arith.constant 0 : index
-!CHECK: %[[C4:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[C0]] : index) upperbound(%[[C4]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a%array(1:5)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
-
- !$acc enter data create(a%array(:5))
-
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[C4:.*]] = arith.constant 4 : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[C4]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a%array(:5)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
-
- !$acc enter data create(a%array(2:))
-
-
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[DECLA]]#0{"array"} shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[LB:.*]] = arith.constant 1 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[ONE]] : index) startIdx(%[[ONE]] : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a%array(2:)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
-
-!$acc enter data create(b%d%array)
-
-
-
-!CHECK: %[[D_COORD:.*]] = hlfir.designate %[[DECLB]]#0{"d"} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTt{d:!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>}>>) -> !fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[D_COORD]]{"array"} shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "b%d%array", structured = false}
-
- !$acc enter data create(c%data)
-
-
-!CHECK: %[[DATA_COORD:.*]] = hlfir.designate %[[DECLC]]#0{"data"} {fortran_attrs = #fir.var_attrs<allocatable>} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTz{data:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-!CHECK: %[[DATA_BOX:.*]] = fir.load %[[DATA_COORD]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-!CHECK: %[[DIM0:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[DATA_BOX]], %[[DIM0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[DIM0_1:.*]] = arith.constant 0 : index
-!CHECK: %[[DIMS0_1:.*]]:3 = fir.box_dims %[[DATA_BOX]], %[[DIM0_1]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
-!CHECK: %[[UB:.*]] = arith.subi %[[DIMS0_1]]#1, %[[ONE]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%[[UB]] : index) extent(%[[DIMS0_1]]#1 : index) stride(%[[DIMS0_1]]#2 : index) startIdx(%[[DIMS0]]#0 : index) {strideInBytes = true}
-!CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DATA_BOX]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.heap<!fir.array<?xi32>> {name = "c%data", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?xi32>>)
-
- !$acc enter data create (d%d(1)%array)
-
-
-
-
-
-
-!CHECK: %[[ONE:.*]] = arith.constant 1 : index
-!CHECK: %[[D1_COORD:.*]] = hlfir.designate %[[DECLD]]#0{"d"} <%{{.*}}> (%[[ONE]]) : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTtt{d:!fir.array<10x!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>}>>, !fir.shape<1>, index) -> !fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>
-
-
-!CHECK: %[[C10:.*]] = arith.constant 10 : index
-!CHECK: %[[ARRAY_COORD:.*]] = hlfir.designate %[[D1_COORD]]{"array"} shape %{{.*}} : (!fir.ref<!fir.type<_QFacc_enter_data_derived_typeTdt{data:f32,array:!fir.array<10xf32>}>>, !fir.shape<1>) -> !fir.ref<!fir.array<10xf32>>
-!CHECK: %[[C1:.*]] = arith.constant 1 : index
-!CHECK: %[[LB:.*]] = arith.constant 0 : index
-!CHECK: %[[UB:.*]] = arith.subi %[[C10]], %[[C1]] : index
-!CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C10]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[ARRAY_COORD]] : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "d%d(1_8)%array", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.ref<!fir.array<10xf32>>)
-
-end subroutine
-
-subroutine acc_enter_data_single_array_element()
- type t1
- real, allocatable :: a(:, :)
- end type t1
- type(t1), allocatable :: e(:)
- allocate(e(10)%a(5,5))
-
- !$acc enter data create(e(2)%a(1,2))
-
-!CHECK-LABEL: func.func @_QPacc_enter_data_single_array_element() {
-!CHECK-DAG: %[[VAL_38:.*]]:3 = fir.box_dims %[[BOX:.*]], %[[VAL_37:.*]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
-!CHECK-DAG: %[[VAL_37]] = arith.constant 0 : index
-!CHECK-DAG: %[[VAL_40:.*]]:3 = fir.box_dims %[[BOX]], %[[VAL_39:.*]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
-!CHECK-DAG: %[[VAL_39]] = arith.constant 1 : index
-!CHECK-DAG: %[[VAL_41:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
-!CHECK: %[[VAL_42:.*]] = arith.constant 1 : index
-!CHECK: %[[VAL_43:.*]] = arith.constant 1 : index
-!CHECK: %[[VAL_44:.*]] = arith.subi %[[VAL_43]], %[[VAL_38]]#0 : index
-!CHECK: %[[VAL_45:.*]] = acc.bounds lowerbound(%[[VAL_44]] : index) upperbound(%[[VAL_44]] : index) extent(%[[VAL_42]] : index) stride(%[[VAL_42]] : index) startIdx(%[[VAL_38]]#0 : index)
-!CHECK: %[[VAL_46:.*]] = arith.constant 2 : index
-!CHECK: %[[VAL_47:.*]] = arith.subi %[[VAL_46]], %[[VAL_40]]#0 : index
-!CHECK: %[[VAL_48:.*]] = acc.bounds lowerbound(%[[VAL_47]] : index) upperbound(%[[VAL_47]] : index) extent(%[[VAL_42]] : index) stride(%[[VAL_42]] : index) startIdx(%[[VAL_40]]#0 : index)
-!CHECK: %[[CREATE:.*]] = acc.create varPtr(%[[VAL_41]] : !fir.heap<!fir.array<?x?xf32>>) bounds(%[[VAL_45]], %[[VAL_48]]) -> !fir.heap<!fir.array<?x?xf32>> {name = "e(2_8)%a(1,2)", structured = false}
-!CHECK: acc.enter_data dataOperands(%[[CREATE]] : !fir.heap<!fir.array<?x?xf32>>)
-
-end subroutine
diff --git a/flang/test/Lower/OpenACC/acc-exit-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-exit-data-unwrap-defaultbounds.f90
deleted file mode 100644
index fd942173b637a..0000000000000
--- a/flang/test/Lower/OpenACC/acc-exit-data-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,107 +0,0 @@
-! This test checks lowering of OpenACC exit data directive.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-subroutine acc_exit_data
- integer :: async = 1
- real, dimension(10, 10) :: a, b, c
- real, pointer :: d
- logical :: ifCondition = .TRUE.
-
-!CHECK: %[[A:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ea"}
-!CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-!CHECK: %[[B:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Eb"}
-!CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-!CHECK: %[[C:.*]] = fir.alloca !fir.array<10x10xf32> {{{.*}}uniq_name = "{{.*}}Ec"}
-!CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]]
-!CHECK: %[[D:.*]] = fir.alloca !fir.box<!fir.ptr<f32>> {bindc_name = "d", uniq_name = "{{.*}}Ed"}
-!CHECK: %[[DECLD:.*]]:2 = hlfir.declare %[[D]]
-
- !$acc exit data delete(a)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: acc.exit_data dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-
- !$acc exit data delete(a) if(.true.)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: %[[IF1:.*]] = arith.constant true
-!CHECK: acc.exit_data if(%[[IF1]]) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-
- !$acc exit data delete(a) if(ifCondition)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: %[[IFCOND:.*]] = fir.load %{{.*}} : !fir.ref<!fir.logical<4>>
-!CHECK: %[[IF2:.*]] = fir.convert %[[IFCOND]] : (!fir.logical<4>) -> i1
-!CHECK: acc.exit_data if(%[[IF2]]) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>){{$}}
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-
- !$acc exit data delete(a) delete(b) delete(c)
-!CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: %[[DEVPTR_B:.*]] = acc.getdeviceptr varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "b", structured = false}
-!CHECK: %[[DEVPTR_C:.*]] = acc.getdeviceptr varPtr(%[[DECLC]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "c", structured = false}
-!CHECK: acc.exit_data dataOperands(%[[DEVPTR_A]], %[[DEVPTR_B]], %[[DEVPTR_C]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>){{$}}
-!CHECK: acc.delete accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-!CHECK: acc.delete accPtr(%[[DEVPTR_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "b", structured = false}
-!CHECK: acc.delete accPtr(%[[DEVPTR_C]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "c", structured = false}
-
- !$acc exit data copyout(a) delete(b) detach(d)
-!CHECK: %[[DEVPTR_A:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_copyout>, name = "a", structured = false}
-!CHECK: %[[DEVPTR_B:.*]] = acc.getdeviceptr varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "b", structured = false}
-!CHECK: %[[BOX_D:.*]] = fir.load %[[DECLD]]#0 : !fir.ref<!fir.box<!fir.ptr<f32>>>
-!CHECK: %[[D_ADDR:.*]] = fir.box_addr %[[BOX_D]] : (!fir.box<!fir.ptr<f32>>) -> !fir.ptr<f32>
-!CHECK: %[[DEVPTR_D:.*]] = acc.getdeviceptr varPtr(%[[D_ADDR]] : !fir.ptr<f32>) -> !fir.ptr<f32> {dataClause = #acc<data_clause acc_detach>, name = "d", structured = false}
-!CHECK: acc.exit_data dataOperands(%[[DEVPTR_A]], %[[DEVPTR_B]], %[[DEVPTR_D]] : !fir.ref<!fir.array<10x10xf32>>, !fir.ref<!fir.array<10x10xf32>>, !fir.ptr<f32>)
-!CHECK: acc.copyout accPtr(%[[DEVPTR_A]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) to varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) {name = "a", structured = false}
-!CHECK: acc.delete accPtr(%[[DEVPTR_B]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "b", structured = false}
-!CHECK: acc.detach accPtr(%[[DEVPTR_D]] : !fir.ptr<f32>) {name = "d", structured = false}
-
- !$acc exit data delete(a) async
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: acc.exit_data async dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async {name = "a", structured = false}
-
- !$acc exit data delete(a) wait
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: acc.exit_data wait dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-
- !$acc exit data delete(a) async wait
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: acc.exit_data async wait dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async {name = "a", structured = false}
-
- !$acc exit data delete(a) async(1)
-!CHECK: %[[ASYNC1:.*]] = arith.constant 1 : i32
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC1]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: acc.exit_data async(%[[ASYNC1]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC1]] : i32) {name = "a", structured = false}
-
-
- !$acc exit data delete(a) async(async)
-!CHECK: %[[ASYNC2:.*]] = fir.load %{{.*}} : !fir.ref<i32>
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC2]] : i32) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: acc.exit_data async(%[[ASYNC2]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) async(%[[ASYNC2]] : i32) {name = "a", structured = false}
-
- !$acc exit data delete(a) wait(1)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: %[[WAIT1:.*]] = arith.constant 1 : i32
-!CHECK: acc.exit_data wait(%[[WAIT1]] : i32) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-
- !$acc exit data delete(a) wait(queues: 1, 2)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: %[[WAIT2:.*]] = arith.constant 1 : i32
-!CHECK: %[[WAIT3:.*]] = arith.constant 2 : i32
-!CHECK: acc.exit_data wait(%[[WAIT2]], %[[WAIT3]] : i32, i32) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-
- !$acc exit data delete(a) wait(devnum: 1: queues: 1, 2)
-!CHECK: %[[DEVPTR:.*]] = acc.getdeviceptr varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<10x10xf32>> {dataClause = #acc<data_clause acc_delete>, name = "a", structured = false}
-!CHECK: %[[WAIT4:.*]] = arith.constant 1 : i32
-!CHECK: %[[WAIT5:.*]] = arith.constant 2 : i32
-!CHECK: %[[WAIT6:.*]] = arith.constant 1 : i32
-!CHECK: acc.exit_data wait_devnum(%[[WAIT6]] : i32) wait(%[[WAIT4]], %[[WAIT5]] : i32, i32) dataOperands(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>)
-!CHECK: acc.delete accPtr(%[[DEVPTR]] : !fir.ref<!fir.array<10x10xf32>>) bounds(%{{.*}}, %{{.*}}) {name = "a", structured = false}
-
-end subroutine acc_exit_data
diff --git a/flang/test/Lower/OpenACC/acc-host-data-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-host-data-unwrap-defaultbounds.f90
deleted file mode 100644
index 2de7cc5761a2b..0000000000000
--- a/flang/test/Lower/OpenACC/acc-host-data-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,54 +0,0 @@
-! This test checks lowering of OpenACC host_data directive.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-subroutine acc_host_data()
- real, dimension(10) :: a
- logical :: ifCondition = .TRUE.
-
-! CHECK: %[[A:.*]] = fir.alloca !fir.array<10xf32> {bindc_name = "a", uniq_name = "_QFacc_host_dataEa"}
-! CHECK: %[[DECLA:.*]]:2 = hlfir.declare %[[A]]
-! CHECK: %[[IFCOND:.*]] = fir.address_of(@_QFacc_host_dataEifcondition) : !fir.ref<!fir.logical<4>>
-! CHECK: %[[DECLIFCOND:.*]]:2 = hlfir.declare %[[IFCOND]]
-
- !$acc host_data use_device(a)
- !$acc end host_data
-
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index)
-! CHECK: %[[DA0:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
-! CHECK: %[[DA1:.*]] = acc.use_device varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
- ! CHECK: acc.host_data dataOperands(%[[DA0]], %[[DA1]] : !fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>)
-
- !$acc host_data use_device(a) if_present
- !$acc end host_data
-
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index)
-! CHECK: %[[DA0:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
-! CHECK: %[[DA1:.*]] = acc.use_device varPtr(%[[DECLA]]#1 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
-! CHECK: acc.host_data dataOperands(%[[DA0]], %[[DA1]] : !fir.ref<!fir.array<10xf32>>{{.*}}) {
-! CHECK: } attributes {ifPresent}
-
- !$acc host_data use_device(a) if(ifCondition)
- !$acc end host_data
-
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index)
-! CHECK: %[[DA:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
-! CHECK: %[[LOAD_IFCOND:.*]] = fir.load %[[DECLIFCOND]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: %[[IFCOND_I1:.*]] = fir.convert %[[LOAD_IFCOND]] : (!fir.logical<4>) -> i1
-! CHECK: acc.host_data if(%[[IFCOND_I1]]) dataOperands(%[[DA]]{{.*}} : !fir.ref<!fir.array<10xf32>>{{.*}})
-
- !$acc host_data use_device(a) if(.true.)
- !$acc end host_data
-
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%{{.*}} : index) upperbound(%{{.*}} : index) stride(%{{.*}} : index) startIdx(%{{.*}} : index)
-! CHECK: %[[DA:.*]] = acc.use_device varPtr(%[[DECLA]]#0 : !fir.ref<!fir.array<10xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<10xf32>> {name = "a"}
-! CHECK: acc.host_data dataOperands(%[[DA]]{{.*}} : !fir.ref<!fir.array<10xf32>>{{.*}})
-
- !$acc host_data use_device(a) if(.false.)
- a = 1.0
- !$acc end host_data
-
-! CHECK-NOT: acc.host_data
-! CHECK: hlfir.assign %{{.*}} to %[[DECLA]]#0
-
-end subroutine
diff --git a/flang/test/Lower/OpenACC/acc-private-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-private-unwrap-defaultbounds.f90
deleted file mode 100644
index b1dc4e79f9f73..0000000000000
--- a/flang/test/Lower/OpenACC/acc-private-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,403 +0,0 @@
-! This test checks lowering of OpenACC loop directive.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-! CHECK-LABEL: acc.private.recipe @privatization_ref_10xf32 : !fir.ref<!fir.array<10xf32>> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<10xf32>>):
-! CHECK: %[[C10:.*]] = arith.constant 10 : index
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[C10]] : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<10xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref<!fir.array<10xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<10xf32>>, !fir.ref<!fir.array<10xf32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<10xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_box_UxUx2xi32 : !fir.box<!fir.array<?x?x2xi32>> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?x?x2xi32>>):
-! CHECK: %[[DIM0:.*]]:3 = fir.box_dims %arg0, %c0{{.*}} : (!fir.box<!fir.array<?x?x2xi32>>, index) -> (index, index, index)
-! CHECK: %[[DIM1:.*]]:3 = fir.box_dims %arg0, %c1{{.*}} : (!fir.box<!fir.array<?x?x2xi32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[DIM0]]#1, %[[DIM1]]#1, %c2{{.*}} : (index, index, index) -> !fir.shape<3>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?x?x2xi32>, %[[DIM0]]#1, %[[DIM1]]#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?x?x2xi32>>, !fir.shape<3>) -> (!fir.box<!fir.array<?x?x2xi32>>, !fir.heap<!fir.array<?x?x2xi32>>)
-! CHECK: acc.yield %[[DECL]]#0 : !fir.box<!fir.array<?x?x2xi32>>
-! CHECK: } copy {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?x?x2xi32>>, %[[ARG1:.*]]: !fir.box<!fir.array<?x?x2xi32>>, %[[LB0:.*]]: index, %[[UB0:.*]]: index, %[[STEP0:.*]]: index, %[[LB1:.*]]: index, %[[UB1:.*]]: index, %[[STEP1:.*]]: index, %[[LB2:.*]]: index, %[[UB2:.*]]: index, %[[STEP2:.*]]: index):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}}, %{{.*}} : (index, index, index) -> !fir.shape<3>
-! CHECK: %[[DES_SRC:.*]] = hlfir.designate %[[ARG0]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]], %[[LB2]]:%[[UB2]]:%[[STEP2]]) shape %[[SHAPE]] : (!fir.box<!fir.array<?x?x2xi32>>, index, index, index, index, index, index, index, index, index, !fir.shape<3>) -> !fir.box<!fir.array<?x?x2xi32>>
-! CHECK: %[[DES_DST:.*]] = hlfir.designate %[[ARG1]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]], %[[LB2]]:%[[UB2]]:%[[STEP2]]) shape %[[SHAPE]] : (!fir.box<!fir.array<?x?x2xi32>>, index, index, index, index, index, index, index, index, index, !fir.shape<3>) -> !fir.box<!fir.array<?x?x2xi32>>
-! CHECK: hlfir.assign %[[DES_SRC]] to %[[DES_DST]] : !fir.box<!fir.array<?x?x2xi32>>, !fir.box<!fir.array<?x?x2xi32>>
-! CHECK: acc.terminator
-! CHECK: }
-
-! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_section_lb4.ub9_box_Uxi32 : !fir.box<!fir.array<?xi32>> init {
-! CHECK: ^bb0(%{{.*}}: !fir.box<!fir.array<?xi32>>):
-! CHECK: } copy {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xi32>>, %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>>):
-! CHECK: %[[LB:.*]] = arith.constant 4 : index
-! CHECK: %[[UB:.*]] = arith.constant 9 : index
-! CHECK: %[[STEP:.*]] = arith.constant 1 : index
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[EXT0:.*]] = arith.subi %[[UB]], %[[LB]] : index
-! CHECK: %[[EXT1:.*]] = arith.addi %[[EXT0]], %[[C1]] : index
-! CHECK: %[[EXT2:.*]] = arith.divsi %[[EXT1]], %[[STEP]] : index
-! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[EXT2]], %[[C0]] : index
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[EXT2]], %[[C0]] : index
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[SELECT]] : (index) -> !fir.shape<1>
-! CHECK: %[[LEFT:.*]] = hlfir.designate %[[ARG0]] shape %[[SHAPE]] : (!fir.box<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: %[[RIGHT:.*]] = hlfir.designate %[[ARG1]] shape %[[SHAPE]] : (!fir.box<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: hlfir.assign %[[LEFT]] to %[[RIGHT]] : !fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>
-! CHECK: acc.terminator
-! CHECK: }
-
-! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_box_Uxi32 : !fir.box<!fir.array<?xi32>> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xi32>>):
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %c0 : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xi32>, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-! CHECK: acc.yield %[[DECL]]#0 : !fir.box<!fir.array<?xi32>>
-! CHECK: } copy {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xi32>>, %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[ARG0]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %[[SHAPE]] : (!fir.box<!fir.array<?xi32>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[ARG1]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %[[SHAPE]] : (!fir.box<!fir.array<?xi32>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: hlfir.assign %[[DES_V1]] to %[[DES_V2]] : !fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>
-! CHECK: acc.terminator
-! CHECK: }
-
-! CHECK-LABEL: acc.private.recipe @privatization_box_UxUx2xi32 : !fir.box<!fir.array<?x?x2xi32>> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?x?x2xi32>>):
-! CHECK: %[[DIM0:.*]]:3 = fir.box_dims %arg0, %c0{{.*}} : (!fir.box<!fir.array<?x?x2xi32>>, index) -> (index, index, index)
-! CHECK: %[[DIM1:.*]]:3 = fir.box_dims %arg0, %c1{{.*}} : (!fir.box<!fir.array<?x?x2xi32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[DIM0]]#1, %[[DIM1]]#1, %c2{{.*}} : (index, index, index) -> !fir.shape<3>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?x?x2xi32>, %[[DIM0]]#1, %[[DIM1]]#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?x?x2xi32>>, !fir.shape<3>) -> (!fir.box<!fir.array<?x?x2xi32>>, !fir.heap<!fir.array<?x?x2xi32>>)
-! CHECK: acc.yield %[[DECL]]#0 : !fir.box<!fir.array<?x?x2xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.private.recipe @privatization_box_ptr_Uxi32 : !fir.box<!fir.ptr<!fir.array<?xi32>>> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.ptr<!fir.array<?xi32>>>):
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %arg0, %c0 : (!fir.box<!fir.ptr<!fir.array<?xi32>>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xi32>, %0#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box<!fir.array<?xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.private.recipe @privatization_box_heap_Uxi32 : !fir.box<!fir.heap<!fir.array<?xi32>>> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.heap<!fir.array<?xi32>>>):
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xi32>, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box<!fir.array<?xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.private.recipe @privatization_box_Uxi32 : !fir.box<!fir.array<?xi32>> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xi32>>):
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xi32>, %0#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-! CHECK: acc.yield %[[DECLARE:.*]]#0 : !fir.box<!fir.array<?xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_section_lb50.ub99_ref_50xf32 : !fir.ref<!fir.array<50xf32>> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<50xf32>>):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<50xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref<!fir.array<50xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<50xf32>>, !fir.ref<!fir.array<50xf32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<50xf32>>
-! CHECK: } copy {
-! CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.array<50xf32>>, %[[DST:.*]]: !fir.ref<!fir.array<50xf32>>):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[DECL_SRC:.*]]:2 = hlfir.declare %[[SRC]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref<!fir.array<50xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<50xf32>>, !fir.ref<!fir.array<50xf32>>)
-! CHECK: %[[DECL_DST:.*]]:2 = hlfir.declare %[[DST]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref<!fir.array<50xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<50xf32>>, !fir.ref<!fir.array<50xf32>>)
-! CHECK: %[[DES_SRC:.*]] = hlfir.designate %[[DECL_SRC]]#0 shape %[[SHAPE:.*]] : (!fir.ref<!fir.array<50xf32>>, !fir.shape<1>) -> !fir.ref<!fir.array<50xf32>>
-! CHECK: %[[DES_DST:.*]] = hlfir.designate %[[DECL_DST]]#0 shape %[[SHAPE:.*]] : (!fir.ref<!fir.array<50xf32>>, !fir.shape<1>) -> !fir.ref<!fir.array<50xf32>>
-! CHECK: hlfir.assign %[[DES_SRC]] to %[[DES_DST]] : !fir.ref<!fir.array<50xf32>>, !fir.ref<!fir.array<50xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_section_ext100_ref_100xf32 : !fir.ref<!fir.array<100xf32>> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100xf32>>
-! CHECK: } copy {
-! CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<!fir.array<100xf32>>, %[[DST:.*]]: !fir.ref<!fir.array<100xf32>>):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[DECL_SRC:.*]]:2 = hlfir.declare %[[SRC]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: %[[DECL_DST:.*]]:2 = hlfir.declare %[[DST]](%[[SHAPE]]) {uniq_name = ""} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: %[[DES_SRC:.*]] = hlfir.designate %[[DECL_SRC]]#0 shape %[[SHAPE]] : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> !fir.ref<!fir.array<100xf32>>
-! CHECK: %[[DES_DST:.*]] = hlfir.designate %[[DECL_DST]]#0 shape %[[SHAPE]] : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> !fir.ref<!fir.array<100xf32>>
-! CHECK: hlfir.assign %[[DES_SRC]] to %[[DES_DST]] : !fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>
-! CHECK: acc.terminator
-! CHECK: }
-
-! CHECK-LABEL: acc.firstprivate.recipe @firstprivatization_ref_i32 : !fir.ref<i32> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.private.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: } copy {
-! CHECK: ^bb0(%[[SRC:.*]]: !fir.ref<i32>, %[[DST:.*]]: !fir.ref<i32>):
-! CHECK: %[[VALUE:.*]] = fir.load %[[SRC]] : !fir.ref<i32>
-! CHECK: fir.store %[[VALUE]] to %[[DST]] : !fir.ref<i32>
-! CHECK: acc.terminator
-! CHECK: }
-
-! CHECK-LABEL: acc.private.recipe @privatization_ref_50xf32 : !fir.ref<!fir.array<50xf32>> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<50xf32>>):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<50xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref<!fir.array<50xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<50xf32>>, !fir.ref<!fir.array<50xf32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<50xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.private.recipe @privatization_ref_100xf32 : !fir.ref<!fir.array<100xf32>> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.private.init"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.private.recipe @privatization_ref_i32 : !fir.ref<i32> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.private.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: }
-
-program acc_private
- integer :: i, c
- integer, parameter :: n = 100
- real, dimension(n) :: a, b
-
-! CHECK: %[[B:.*]] = fir.address_of(@_QFEb) : !fir.ref<!fir.array<100xf32>>
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[C:.*]] = fir.alloca i32 {bindc_name = "c", uniq_name = "_QFEc"}
-! CHECK: %[[DECLC:.*]]:2 = hlfir.declare %[[C]]
-
- !$acc loop private(c)
- DO i = 1, n
- c = i
- a(i) = b(i) + c
- END DO
-
-! CHECK: %[[C_PRIVATE:.*]] = acc.private varPtr(%[[DECLC]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {name = "c"}
-! CHECK: acc.loop private({{.*}}@privatization_ref_i32 -> %[[C_PRIVATE]] : !fir.ref<i32>{{.*}})
-! CHECK: acc.yield
-
- !$acc loop private(b)
- DO i = 1, n
- c = i
- a(i) = b(i) + c
- END DO
-
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.subi %{{.*}}, %[[C1]] : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-! CHECK: %[[B_PRIVATE:.*]] = acc.private varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xf32>> {name = "b"}
-! CHECK: acc.loop private({{.*}}@privatization_ref_100xf32 -> %[[B_PRIVATE]] : !fir.ref<!fir.array<100xf32>>{{.*}})
-! CHECK: acc.yield
-
- !$acc loop private(b(1:50))
- DO i = 1, n
- c = i
- a(i) = b(i) + c
- END DO
-
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.constant 49 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-! CHECK: %[[B_PRIVATE:.*]] = acc.private varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<50xf32>> {name = "b(1:50)"}
-! CHECK: acc.loop private({{.*}}@privatization_ref_50xf32 -> %[[B_PRIVATE]] : !fir.ref<!fir.array<50xf32>>{{.*}})
-
- !$acc parallel loop firstprivate(c)
- DO i = 1, n
- c = i
- a(i) = b(i) + c
- END DO
-
-! CHECK: %[[FP_C:.*]] = acc.firstprivate varPtr(%[[DECLC]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {name = "c"}
-! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_ref_i32 -> %[[FP_C]] : !fir.ref<i32>)
-! CHECK: acc.yield
-
- !$acc parallel loop firstprivate(b)
- DO i = 1, n
- c = i
- a(i) = b(i) + c
- END DO
-
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.subi %{{.*}}, %[[C1]] : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-! CHECK: %[[FP_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xf32>> {name = "b"}
-! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_section_ext100_ref_100xf32 -> %[[FP_B]] : !fir.ref<!fir.array<100xf32>>)
-! CHECK: acc.yield
-
- !$acc parallel loop firstprivate(b(51:100))
- DO i = 1, n
- c = i
- a(i) = b(i) + c
- END DO
-
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.constant 50 : index
-! CHECK: %[[UB:.*]] = arith.constant 99 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%{{.*}} : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-! CHECK: %[[FP_B:.*]] = acc.firstprivate varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<50xf32>> {name = "b(51:100)"}
-! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_section_lb50.ub99_ref_50xf32 -> %[[FP_B]] : !fir.ref<!fir.array<50xf32>>)
-
-end program
-
-subroutine acc_private_assumed_shape(a, n)
- integer :: a(:), i, n
-
- !$acc parallel loop private(a)
- do i = 1, n
- a(i) = i
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_private_assumed_shape(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"}
-! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_private_assumed_shapeEa"} : (!fir.box<!fir.array<?xi32>>, !fir.dscope) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-! CHECK: acc.parallel {{.*}} {
-! CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
-! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[ADDR]] : !fir.ref<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<?xi32>> {name = "a"}
-! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_Uxi32 -> %[[PRIVATE]] : !fir.ref<!fir.array<?xi32>>{{.*}})
-
-subroutine acc_private_allocatable_array(a, n)
- integer, allocatable :: a(:)
- integer :: i, n
-
- !$acc parallel loop private(a)
- do i = 1, n
- a(i) = i
- end do
-
- !$acc serial private(a)
- a(i) = 1
- !$acc end serial
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_private_allocatable_array(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {fir.bindc_name = "a"}
-! CHECK: %[[DECLA_A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFacc_private_allocatable_arrayEa"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
-! CHECK: acc.parallel {{.*}} {
-! CHECK: %[[BOX:.*]] = fir.load %[[DECLA_A]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.heap<!fir.array<?xi32>>>) -> !fir.heap<!fir.array<?xi32>>
-! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.heap<!fir.array<?xi32>> {name = "a"}
-! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_heap_Uxi32 -> %[[PRIVATE]] : !fir.heap<!fir.array<?xi32>>{{.*}})
-! CHECK: acc.serial private(@privatization_box_heap_Uxi32 -> %{{.*}} : !fir.heap<!fir.array<?xi32>>)
-
-subroutine acc_private_pointer_array(a, n)
- integer, pointer :: a(:)
- integer :: i, n
-
- !$acc parallel loop private(a)
- do i = 1, n
- a(i) = i
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_private_pointer_array(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>> {fir.bindc_name = "a"}, %arg1: !fir.ref<i32> {fir.bindc_name = "n"}) {
-! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %arg0 dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFacc_private_pointer_arrayEa"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>)
-! CHECK: acc.parallel {{.*}} {
-! CHECK: %[[BOX:.*]] = fir.load %[[DECLA_A]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.ptr<!fir.array<?xi32>>>) -> !fir.ptr<!fir.array<?xi32>>
-! CHECK: %[[PRIVATE:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.ptr<!fir.array<?xi32>> {name = "a"}
-! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_ptr_Uxi32 -> %[[PRIVATE]] : !fir.ptr<!fir.array<?xi32>>{{.*}})
-
-subroutine acc_private_dynamic_extent(a, n)
- integer :: n, i
- integer :: a(n, n, 2)
-
- !$acc parallel loop private(a)
- do i = 1, n
- a(i, i, 1) = i
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_private_dynamic_extent(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<?x?x2xi32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<i32> {fir.bindc_name = "n"}) {
-! CHECK: %[[DECL_N:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_private_dynamic_extentEn"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_private_dynamic_extentEa"} : (!fir.ref<!fir.array<?x?x2xi32>>, !fir.shape<3>, !fir.dscope) -> (!fir.box<!fir.array<?x?x2xi32>>, !fir.ref<!fir.array<?x?x2xi32>>)
-! CHECK: acc.parallel {{.*}} {
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box<!fir.array<?x?x2xi32>>) -> !fir.ref<!fir.array<?x?x2xi32>>
-! CHECK: %[[PRIV:.*]] = acc.private varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?x?x2xi32>>) bounds(%{{.*}}, %{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<?x?x2xi32>> {name = "a"}
-! CHECK: acc.loop {{.*}} private({{.*}}@privatization_box_UxUx2xi32 -> %[[PRIV]] : !fir.ref<!fir.array<?x?x2xi32>>{{.*}})
-
-subroutine acc_firstprivate_assumed_shape(a, n)
- integer :: a(:), i, n
-
- !$acc parallel loop firstprivate(a)
- do i = 1, n
- a(i) = i
- end do
-end subroutine
-
-subroutine acc_firstprivate_assumed_shape_with_section(a, n)
- integer :: a(:), i, n
-
- !$acc parallel loop firstprivate(a(5:10))
- do i = 1, n
- a(i) = i
- end do
-end subroutine
-
-subroutine acc_firstprivate_dynamic_extent(a, n)
- integer :: n, i
- integer :: a(n, n, 2)
-
- !$acc parallel loop firstprivate(a)
- do i = 1, n
- a(i, i, 1) = i
- end do
-end subroutine
-
-! CHECK: acc.parallel {{.*}} firstprivate(@firstprivatization_box_UxUx2xi32 -> %{{.*}} : !fir.ref<!fir.array<?x?x2xi32>>)
-
-module acc_declare_equivalent
- integer, parameter :: n = 10
- real :: v1(n)
- real :: v2(n)
- equivalence(v1(1), v2(1))
-contains
- subroutine sub1()
- !$acc parallel private(v2)
- !$acc end parallel
- end subroutine
-end module
-
-! CHECK: acc.parallel private(@privatization_ref_10xf32 -> %{{.*}} : !fir.ref<!fir.array<10xf32>>)
-
-subroutine acc_private_use()
- integer :: i, j
-
- !$acc parallel loop
- do i = 1, 10
- j = i
- end do
-end
-
-! CHECK-LABEL: func.func @_QPacc_private_use()
-! CHECK: %[[I:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFacc_private_useEi"}
-! CHECK: %[[DECL_I:.*]]:2 = hlfir.declare %[[I]] {uniq_name = "_QFacc_private_useEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: acc.parallel
-! CHECK: %[[PRIV_I:.*]] = acc.private varPtr(%[[DECL_I]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {implicit = true, name = "i"}
-! CHECK: %[[DECL_PRIV_I:.*]]:2 = hlfir.declare %[[PRIV_I]] {uniq_name = "_QFacc_private_useEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: acc.loop {{.*}} private(@privatization_ref_i32 -> %[[PRIV_I]] : !fir.ref<i32>) control(%[[IV0:.*]] : i32) = (%c1{{.*}} : i32) to (%c10{{.*}} : i32) step (%c1{{.*}} : i32)
-! CHECK: fir.store %[[IV0]] to %[[DECL_PRIV_I]]#0 : !fir.ref<i32>
-! CHECK: %{{.*}} = fir.load %[[DECL_PRIV_I]]#0 : !fir.ref<i32>
diff --git a/flang/test/Lower/OpenACC/acc-reduction-unwrap-defaultbounds.f90 b/flang/test/Lower/OpenACC/acc-reduction-unwrap-defaultbounds.f90
deleted file mode 100644
index b48f530473740..0000000000000
--- a/flang/test/Lower/OpenACC/acc-reduction-unwrap-defaultbounds.f90
+++ /dev/null
@@ -1,1227 +0,0 @@
-! This test checks lowering of OpenACC reduction clause.
-
-! RUN: bbc -fopenacc -emit-hlfir --openacc-unwrap-fir-box=true --openacc-generate-default-bounds=true %s -o - | FileCheck %s
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_UxUxf32 : !fir.box<!fir.array<?x?xf32>> reduction_operator <max> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?x?xf32>>):
-! CHECK: %[[CST:.*]] = arith.constant -1.401300e-45 : f32
-! CHECK: %[[DIMS0:.*]]:3 = fir.box_dims %[[ARG0]], %c0{{.*}} : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
-! CHECK: %[[DIMS1:.*]]:3 = fir.box_dims %[[ARG0]], %c1 : (!fir.box<!fir.array<?x?xf32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[DIMS0]]#1, %[[DIMS1]]#1 : (index, index) -> !fir.shape<2>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[DIMS0]]#1, %[[DIMS1]]#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> (!fir.box<!fir.array<?x?xf32>>, !fir.heap<!fir.array<?x?xf32>>)
-! CHECK: hlfir.assign %[[CST]] to %[[DECL]]#0 : f32, !fir.box<!fir.array<?x?xf32>>
-! CHECK: acc.yield %[[DECL]]#0 : !fir.box<!fir.array<?x?xf32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[V1:.*]]: !fir.box<!fir.array<?x?xf32>>, %[[V2:.*]]: !fir.box<!fir.array<?x?xf32>>, %[[LB0:.*]]: index, %[[UB0:.*]]: index, %[[STEP0:.*]]: index, %[[LB1:.*]]: index, %[[UB1:.*]]: index, %[[STEP1:.*]]: index):
-
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2>
-! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[V1]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]]) shape %[[SHAPE]] : (!fir.box<!fir.array<?x?xf32>>, index, index, index, index, index, index, !fir.shape<2>) -> !fir.box<!fir.array<?x?xf32>>
-! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[V2]] (%[[LB0]]:%[[UB0]]:%[[STEP0]], %[[LB1]]:%[[UB1]]:%[[STEP1]]) shape %[[SHAPE]] : (!fir.box<!fir.array<?x?xf32>>, index, index, index, index, index, index, !fir.shape<2>) -> !fir.box<!fir.array<?x?xf32>>
-! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %[[SHAPE]] unordered : (!fir.shape<2>) -> !hlfir.expr<?x?xf32> {
-! CHECK: ^bb0(%[[ARG0:.*]]: index, %[[ARG1:.*]]: index):
-! CHECK: %[[D1:.*]] = hlfir.designate %[[DES_V1]] (%[[ARG0]], %[[ARG1]]) : (!fir.box<!fir.array<?x?xf32>>, index, index) -> !fir.ref<f32>
-! CHECK: %[[D2:.*]] = hlfir.designate %[[DES_V2]] (%[[ARG0]], %[[ARG1]]) : (!fir.box<!fir.array<?x?xf32>>, index, index) -> !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[D1]] : !fir.ref<f32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[D2]] : !fir.ref<f32>
-! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD1]], %[[LOAD2]] {{.*}} : f32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : f32
-! CHECK: hlfir.yield_element %[[SELECT]] : f32
-! CHECK: }
-! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[V1]] : !hlfir.expr<?x?xf32>, !fir.box<!fir.array<?x?xf32>>
-! CHECK: acc.yield %[[V1]] : !fir.box<!fir.array<?x?xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_ptr_Uxf32 : !fir.box<!fir.ptr<!fir.array<?xf32>>> reduction_operator <max> init {
-! CHECK: ^bb0(%{{.*}}: !fir.box<!fir.ptr<!fir.array<?xf32>>>):
-! CHECK: } combiner {
-! CHECK: ^bb0(%{{.*}}: !fir.box<!fir.ptr<!fir.array<?xf32>>>, %{{.*}}: !fir.box<!fir.ptr<!fir.array<?xf32>>>, %{{.*}}: index, %{{.*}}: index, %{{.*}}: index):
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_heap_Uxf32 : !fir.box<!fir.heap<!fir.array<?xf32>>> reduction_operator <max> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.heap<!fir.array<?xf32>>>):
-! CHECK: %[[CST:.*]] = arith.constant -1.401300e-45 : f32
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xf32>, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %2(%1) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xf32>>, !fir.heap<!fir.array<?xf32>>)
-! CHECK: hlfir.assign %[[CST]] to %[[DECLARE]]#0 : f32, !fir.box<!fir.array<?xf32>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box<!fir.array<?xf32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.heap<!fir.array<?xf32>>>, %[[ARG1:.*]]: !fir.box<!fir.heap<!fir.array<?xf32>>>, %[[ARG2:.*]]: index, %[[ARG3:.*]]: index, %[[ARG4:.*]]: index):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[ARG0]] (%[[ARG2]]:%[[ARG3]]:%[[ARG4]]) shape %[[SHAPE]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xf32>>>
-! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[ARG1]] (%[[ARG2]]:%[[ARG3]]:%[[ARG4]]) shape %[[SHAPE]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xf32>>>
-! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %[[SHAPE]] unordered : (!fir.shape<1>) -> !hlfir.expr<?xf32> {
-! CHECK: ^bb0(%[[IV:.*]]: index):
-! CHECK: %[[V1:.*]] = hlfir.designate %[[DES_V1]] (%[[IV]]) : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> !fir.ref<f32>
-! CHECK: %[[V2:.*]] = hlfir.designate %[[DES_V2]] (%[[IV]]) : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> !fir.ref<f32>
-! CHECK: %[[LOAD_V1:.*]] = fir.load %[[V1]] : !fir.ref<f32>
-! CHECK: %[[LOAD_V2:.*]] = fir.load %[[V2]] : !fir.ref<f32>
-! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD_V1]], %[[LOAD_V2]] {{.*}} : f32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD_V1]], %[[LOAD_V2]] : f32
-! CHECK: hlfir.yield_element %[[SELECT]] : f32
-! CHECK: }
-! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[ARG0]] : !hlfir.expr<?xf32>, !fir.box<!fir.heap<!fir.array<?xf32>>>
-! CHECK: acc.yield %[[ARG0]] : !fir.box<!fir.heap<!fir.array<?xf32>>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_lb1.ub3_box_Uxi32 : !fir.box<!fir.array<?xi32>> reduction_operator <add> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xi32>>):
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %c0{{.*}} : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xi32>, %0#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-! CHECK: hlfir.assign %c0{{.*}} to %[[DECLARE]]#0 : i32, !fir.box<!fir.array<?xi32>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box<!fir.array<?xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xi32>>, %[[ARG1:.*]]: !fir.box<!fir.array<?xi32>>):
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[DES1:.*]] = hlfir.designate %[[ARG0]] shape %[[SHAPE]] : (!fir.box<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: %[[DES2:.*]] = hlfir.designate %[[ARG1]] shape %[[SHAPE]] : (!fir.box<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %[[SHAPE]] unordered : (!fir.shape<1>) -> !hlfir.expr<?xi32> {
-! CHECK: ^bb0(%[[IV:.*]]: index):
-! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[DES1]] (%[[IV]]) : (!fir.box<!fir.array<?xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[DES2]] (%[[IV]]) : (!fir.box<!fir.array<?xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD_V1:.*]] = fir.load %[[DES_V1]] : !fir.ref<i32>
-! CHECK: %[[LOAD_V2:.*]] = fir.load %[[DES_V2]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD_V1]], %[[LOAD_V2]] : i32
-! CHECK: hlfir.yield_element %[[COMBINED]] : i32
-! CHECK: }
-! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[ARG0]] : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
-! CHECK: acc.yield %[[ARG0]] : !fir.box<!fir.array<?xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_box_Uxf32 : !fir.box<!fir.array<?xf32>> reduction_operator <max> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xf32>>):
-! CHECK: %[[INIT_VALUE:.*]] = arith.constant -1.401300e-45 : f32
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xf32>, %0#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xf32>>, !fir.heap<!fir.array<?xf32>>)
-! CHECK: hlfir.assign %[[INIT_VALUE]] to %[[DECLARE]]#0 : f32, !fir.box<!fir.array<?xf32>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box<!fir.array<?xf32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xf32>>, %[[ARG1:.*]]: !fir.box<!fir.array<?xf32>>
-! CHECK: %[[LEFT:.*]] = hlfir.designate %[[ARG0]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box<!fir.array<?xf32>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.array<?xf32>>
-! CHECK: %[[RIGHT:.*]] = hlfir.designate %[[ARG1]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box<!fir.array<?xf32>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.array<?xf32>>
-! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %{{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr<?xf32> {
-! CHECK: ^bb0(%{{.*}}: index):
-! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[LEFT]] (%{{.*}}) : (!fir.box<!fir.array<?xf32>>, index) -> !fir.ref<f32>
-! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[RIGHT]] (%{{.*}}) : (!fir.box<!fir.array<?xf32>>, index) -> !fir.ref<f32>
-! CHECK: %[[LOAD_V1:.*]] = fir.load %[[DES_V1]] : !fir.ref<f32>
-! CHECK: %[[LOAD_V2:.*]] = fir.load %[[DES_V2]] : !fir.ref<f32>
-! CHECK: %[[CMPF:.*]] = arith.cmpf ogt, %[[LOAD_V1]], %[[LOAD_V2]] {{.*}} : f32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMPF]], %[[LOAD_V1]], %[[LOAD_V2]] : f32
-! CHECK: hlfir.yield_element %[[SELECT]] : f32
-! CHECK: }
-! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[ARG0]] : !hlfir.expr<?xf32>, !fir.box<!fir.array<?xf32>>
-! CHECK: acc.yield %[[ARG0]] : !fir.box<!fir.array<?xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_box_Uxi32 : !fir.box<!fir.array<?xi32>> reduction_operator <add> init {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.box<!fir.array<?xi32>>):
-! CHECK: %[[INIT_VALUE:.*]] = arith.constant 0 : i32
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[BOX_DIMS:.*]]:3 = fir.box_dims %[[ARG0]], %[[C0]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
-! CHECK: %[[SHAPE:.*]] = fir.shape %[[BOX_DIMS]]#1 : (index) -> !fir.shape<1>
-! CHECK: %[[TEMP:.*]] = fir.allocmem !fir.array<?xi32>, %[[BOX_DIMS]]#1 {bindc_name = ".tmp", uniq_name = ""}
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[TEMP]](%[[SHAPE]]) {uniq_name = ".tmp"} : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> (!fir.box<!fir.array<?xi32>>, !fir.heap<!fir.array<?xi32>>)
-! CHECK: hlfir.assign %[[INIT_VALUE]] to %[[DECLARE]]#0 : i32, !fir.box<!fir.array<?xi32>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.box<!fir.array<?xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[V1:.*]]: !fir.box<!fir.array<?xi32>>, %[[V2:.*]]: !fir.box<!fir.array<?xi32>>
-! CHECK: %[[LEFT:.*]] = hlfir.designate %[[ARG0]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box<!fir.array<?xi32>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: %[[RIGHT:.*]] = hlfir.designate %[[ARG1]] (%{{.*}}:%{{.*}}:%{{.*}}) shape %{{.*}} : (!fir.box<!fir.array<?xi32>>, index, index, index, !fir.shape<1>) -> !fir.box<!fir.array<?xi32>>
-! CHECK: %[[ELEMENTAL:.*]] = hlfir.elemental %{{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr<?xi32> {
-! CHECK: ^bb0(%{{.*}}: index):
-! CHECK: %[[DES_V1:.*]] = hlfir.designate %[[LEFT]] (%{{.*}}) : (!fir.box<!fir.array<?xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[DES_V2:.*]] = hlfir.designate %[[RIGHT]] (%{{.*}}) : (!fir.box<!fir.array<?xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD_V1:.*]] = fir.load %[[DES_V1]] : !fir.ref<i32>
-! CHECK: %[[LOAD_V2:.*]] = fir.load %[[DES_V2]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD_V1]], %[[LOAD_V2]] : i32
-! CHECK: hlfir.yield_element %[[COMBINED]] : i32
-! CHECK: }
-! CHECK: hlfir.assign %[[ELEMENTAL]] to %[[V1]] : !hlfir.expr<?xi32>, !fir.box<!fir.array<?xi32>>
-! CHECK: acc.yield %arg0 : !fir.box<!fir.array<?xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_mul_ref_z32 : !fir.ref<complex<f32>> reduction_operator <mul> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<complex<f32>>):
-! CHECK: %[[REAL:.*]] = arith.constant 1.000000e+00 : f32
-! CHECK: %[[IMAG:.*]] = arith.constant 0.000000e+00 : f32
-! CHECK: %[[UNDEF:.*]] = fir.undefined complex<f32>
-! CHECK: %[[UNDEF1:.*]] = fir.insert_value %[[UNDEF]], %[[REAL]], [0 : index] : (complex<f32>, f32) -> complex<f32>
-! CHECK: %[[UNDEF2:.*]] = fir.insert_value %[[UNDEF1]], %[[IMAG]], [1 : index] : (complex<f32>, f32) -> complex<f32>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca complex<f32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<complex<f32>>) -> (!fir.ref<complex<f32>>, !fir.ref<complex<f32>>)
-! CHECK: fir.store %[[UNDEF2]] to %[[DECLARE]]#0 : !fir.ref<complex<f32>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<complex<f32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<complex<f32>>, %[[ARG1:.*]]: !fir.ref<complex<f32>>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<complex<f32>>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<complex<f32>>
-! CHECK: %[[COMBINED:.*]] = fir.mulc %[[LOAD0]], %[[LOAD1]] {fastmath = #arith.fastmath<contract>} : complex<f32>
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<complex<f32>>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<complex<f32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_ref_z32 : !fir.ref<complex<f32>> reduction_operator <add> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<complex<f32>>):
-! CHECK: %[[REAL:.*]] = arith.constant 0.000000e+00 : f32
-! CHECK: %[[IMAG:.*]] = arith.constant 0.000000e+00 : f32
-! CHECK: %[[UNDEF:.*]] = fir.undefined complex<f32>
-! CHECK: %[[UNDEF1:.*]] = fir.insert_value %[[UNDEF]], %[[REAL]], [0 : index] : (complex<f32>, f32) -> complex<f32>
-! CHECK: %[[UNDEF2:.*]] = fir.insert_value %[[UNDEF1]], %[[IMAG]], [1 : index] : (complex<f32>, f32) -> complex<f32>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca complex<f32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<complex<f32>>) -> (!fir.ref<complex<f32>>, !fir.ref<complex<f32>>)
-! CHECK: fir.store %[[UNDEF2]] to %[[DECLARE]]#0 : !fir.ref<complex<f32>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<complex<f32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<complex<f32>>, %[[ARG1:.*]]: !fir.ref<complex<f32>>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<complex<f32>>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<complex<f32>>
-! CHECK: %[[COMBINED:.*]] = fir.addc %[[LOAD0]], %[[LOAD1]] {fastmath = #arith.fastmath<contract>} : complex<f32>
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<complex<f32>>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<complex<f32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_neqv_ref_l32 : !fir.ref<!fir.logical<4>> reduction_operator <neqv> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[CST:.*]] = arith.constant false
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.logical<4>>, %[[ARG1:.*]]: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CMP:.*]] = arith.cmpi ne, %[[CONV0]], %[[CONV1]] : i1
-! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_eqv_ref_l32 : !fir.ref<!fir.logical<4>> reduction_operator <eqv> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[CST:.*]] = arith.constant true
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.logical<4>>, %[[ARG1:.*]]: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CMP:.*]] = arith.cmpi eq, %[[CONV0]], %[[CONV1]] : i1
-! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_lor_ref_l32 : !fir.ref<!fir.logical<4>> reduction_operator <lor> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[CST:.*]] = arith.constant false
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.logical<4>>, %[[ARG1:.*]]: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CMP:.*]] = arith.ori %[[CONV0]], %[[CONV1]] : i1
-! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_land_ref_l32 : !fir.ref<!fir.logical<4>> reduction_operator <land> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[CST:.*]] = arith.constant true
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.logical<4>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-! CHECK: %[[CONVERT:.*]] = fir.convert %[[CST]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CONVERT]] to %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.logical<4>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.logical<4>>, %[[ARG1:.*]]: !fir.ref<!fir.logical<4>>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<!fir.logical<4>>
-! CHECK: %[[CONV0:.*]] = fir.convert %[[LOAD0]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CONV1:.*]] = fir.convert %[[LOAD1]] : (!fir.logical<4>) -> i1
-! CHECK: %[[CMP:.*]] = arith.andi %[[CONV0]], %[[CONV1]] : i1
-! CHECK: %[[CMP_CONV:.*]] = fir.convert %[[CMP]] : (i1) -> !fir.logical<4>
-! CHECK: fir.store %[[CMP_CONV]] to %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.logical<4>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_xor_ref_i32 : !fir.ref<i32> reduction_operator <xor> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[CST:.*]] = arith.constant 0 : i32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: fir.store %[[CST]] to %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<i32>, %[[ARG1:.*]]: !fir.ref<i32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.xori %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<i32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<i32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_ior_ref_i32 : !fir.ref<i32> reduction_operator <ior> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[CST:.*]] = arith.constant 0 : i32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: fir.store %[[CST]] to %[[DECLARE:.*]]#0 : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE:.*]]#0 : !fir.ref<i32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<i32>, %[[ARG1:.*]]: !fir.ref<i32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.ori %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<i32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<i32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_iand_ref_i32 : !fir.ref<i32> reduction_operator <iand> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[CST:.*]] = arith.constant -1 : i32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: fir.store %[[CST]] to %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<i32>, %[[ARG1:.*]]: !fir.ref<i32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.andi %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<i32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<i32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_section_ext100_ref_100xf32 : !fir.ref<!fir.array<100xf32>> reduction_operator <max> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
-! CHECK: %[[INIT:.*]] = arith.constant -1.401300e-45 : f32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] {
-! CHECK: %[[COORD:.*]] = fir.coordinate_of %[[DECLARE]]#0, %[[IV]] : (!fir.ref<!fir.array<100xf32>>, index) -> !fir.ref<f32>
-! CHECK: fir.store %[[INIT]] to %[[COORD]] : !fir.ref<f32>
-! CHECK: }
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100xf32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100xf32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100xf32>>):
-! CHECK: %[[LB0:.*]] = arith.constant 0 : index
-! CHECK: %[[UB0:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP0:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV0]] : (!fir.ref<!fir.array<100xf32>>, index) -> !fir.ref<f32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV0]] : (!fir.ref<!fir.array<100xf32>>, index) -> !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<f32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<f32>
-! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD1]], %[[LOAD2]] {{.*}} : f32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : f32
-! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref<f32>
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_ref_f32 : !fir.ref<f32> reduction_operator <max> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
-! CHECK: %[[INIT:.*]] = arith.constant -1.401300e-45 : f32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca f32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %0 {uniq_name = "acc.reduction.init"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<f32>, %[[ARG1:.*]]: !fir.ref<f32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<f32>
-! CHECK: %[[CMP:.*]] = arith.cmpf ogt, %[[LOAD0]], %[[LOAD1]] {{.*}} : f32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : f32
-! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref<f32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<f32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_section_ext100xext10_ref_100x10xi32 : !fir.ref<!fir.array<100x10xi32>> reduction_operator <max> init {
-! CHECK: ^bb0(%arg0: !fir.ref<!fir.array<100x10xi32>>):
-! CHECK: %[[INIT:.*]] = arith.constant -2147483648 : i32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10xi32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100x10xi32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<100x10xi32>>, !fir.ref<!fir.array<100x10xi32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100x10xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100x10xi32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10xi32>>):
-! CHECK: %[[LB0:.*]] = arith.constant 0 : index
-! CHECK: %[[UB0:.*]] = arith.constant 9 : index
-! CHECK: %[[STEP0:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] {
-! CHECK: %[[LB1:.*]] = arith.constant 0 : index
-! CHECK: %[[UB1:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP1:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0:.*]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10xi32>>, index, index) -> !fir.ref<i32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1:.*]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10xi32>>, index, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<i32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<i32>
-! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref<i32>
-! CHECK: }
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100x10xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_max_ref_i32 : !fir.ref<i32> reduction_operator <max> init {
-! CHECK: ^bb0(%arg0: !fir.ref<i32>):
-! CHECK: %[[INIT:.*]] = arith.constant -2147483648 : i32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<i32>, %[[ARG1:.*]]: !fir.ref<i32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<i32>
-! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref<i32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<i32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_min_section_ext100xext10_ref_100x10xf32 : !fir.ref<!fir.array<100x10xf32>> reduction_operator <min> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100x10xf32>>):
-! CHECK: %[[INIT:.*]] = arith.constant 3.40282347E+38 : f32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100x10xf32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<100x10xf32>>, !fir.ref<!fir.array<100x10xf32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100x10xf32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100x10xf32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10xf32>>):
-! CHECK: %[[LB0:.*]] = arith.constant 0 : index
-! CHECK: %[[UB0:.*]] = arith.constant 9 : index
-! CHECK: %[[STEP0:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] {
-! CHECK: %[[LB1:.*]] = arith.constant 0 : index
-! CHECK: %[[UB1:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP1:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10xf32>>, index, index) -> !fir.ref<f32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10xf32>>, index, index) -> !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<f32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<f32>
-! CHECK: %[[CMP:.*]] = arith.cmpf olt, %[[LOAD1]], %[[LOAD2]] {{.*}} : f32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : f32
-! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref<f32>
-! CHECK: }
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100x10xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_min_ref_f32 : !fir.ref<f32> reduction_operator <min> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
-! CHECK: %[[INIT:.*]] = arith.constant 3.40282347E+38 : f32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca f32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<f32>, %[[ARG1:.*]]: !fir.ref<f32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<f32>
-! CHECK: %[[CMP:.*]] = arith.cmpf olt, %[[LOAD0]], %[[LOAD1]] {{.*}} : f32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : f32
-! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref<f32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<f32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_min_section_ext100_ref_100xi32 : !fir.ref<!fir.array<100xi32>> reduction_operator <min> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xi32>>):
-! CHECK: %[[INIT:.*]] = arith.constant 2147483647 : i32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100xi32>>):
-! CHECK: %[[LB0:.*]] = arith.constant 0 : index
-! CHECK: %[[UB0:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP0:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV0]] : (!fir.ref<!fir.array<100xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV0]] : (!fir.ref<!fir.array<100xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<i32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<i32>
-! CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: fir.store %[[SELECT]] to %[[COORD1]] : !fir.ref<i32>
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_min_ref_i32 : !fir.ref<i32> reduction_operator <min> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[INIT:.*]] = arith.constant 2147483647 : i32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<i32>, %[[ARG1:.*]]: !fir.ref<i32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<i32>
-! CHECK: %[[CMP:.*]] = arith.cmpi slt, %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: %[[SELECT:.*]] = arith.select %[[CMP]], %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: fir.store %[[SELECT]] to %[[ARG0]] : !fir.ref<i32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<i32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_mul_ref_f32 : !fir.ref<f32> reduction_operator <mul> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
-! CHECK: %[[INIT:.*]] = arith.constant 1.000000e+00 : f32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca f32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<f32>, %[[ARG1:.*]]: !fir.ref<f32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<f32>
-! CHECK: %[[COMBINED:.*]] = arith.mulf %[[LOAD0]], %[[LOAD1]] fastmath<contract> : f32
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<f32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<f32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_mul_section_ext100_ref_100xi32 : !fir.ref<!fir.array<100xi32>> reduction_operator <mul> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xi32>>):
-! CHECK: %[[INIT:.*]] = arith.constant 1 : i32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100xi32>>):
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV]] : (!fir.ref<!fir.array<100xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV]] : (!fir.ref<!fir.array<100xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<i32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.muli %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref<i32>
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_mul_ref_i32 : !fir.ref<i32> reduction_operator <mul> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[INIT:.*]] = arith.constant 1 : i32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<i32>, %[[ARG1:.*]]: !fir.ref<i32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.muli %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<i32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<i32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100_ref_100xf32 : !fir.ref<!fir.array<100xf32>> reduction_operator <add> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xf32>>):
-! CHECK: %[[INIT:.*]] = arith.constant 0.000000e+00 : f32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xf32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100xf32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xf32>>, !fir.ref<!fir.array<100xf32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100xf32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100xf32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100xf32>>):
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV]] : (!fir.ref<!fir.array<100xf32>>, index) -> !fir.ref<f32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV]] : (!fir.ref<!fir.array<100xf32>>, index) -> !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<f32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<f32>
-! CHECK: %[[COMBINED:.*]] = arith.addf %[[LOAD1]], %[[LOAD2]] fastmath<contract> : f32
-! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref<f32>
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100xf32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_ref_f32 : !fir.ref<f32> reduction_operator <add> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<f32>):
-! CHECK: %[[INIT:.*]] = arith.constant 0.000000e+00 : f32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca f32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<f32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<f32>, %[[ARG1:.*]]: !fir.ref<f32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<f32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<f32>
-! CHECK: %[[COMBINED:.*]] = arith.addf %[[LOAD0]], %[[LOAD1]] fastmath<contract> : f32
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<f32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<f32>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100xext10xext2_ref_100x10x2xi32 : !fir.ref<!fir.array<100x10x2xi32>> reduction_operator <add> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100x10x2xi32>>):
-! CHECK: %[[INIT:.*]] = arith.constant 0 : i32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}}, %{{.*}} : (index, index, index) -> !fir.shape<3>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10x2xi32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100x10x2xi32>>, !fir.shape<3>) -> (!fir.ref<!fir.array<100x10x2xi32>>, !fir.ref<!fir.array<100x10x2xi32>>)
-! CHECK: %[[LB0:.*]] = arith.constant 0 : index
-! CHECK: %[[UB0:.*]] = arith.constant 1 : index
-! CHECK: %[[STEP0:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] {
-! CHECK: %[[LB1:.*]] = arith.constant 0 : index
-! CHECK: %[[UB1:.*]] = arith.constant 9 : index
-! CHECK: %[[STEP1:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] {
-! CHECK: %[[LB2:.*]] = arith.constant 0 : index
-! CHECK: %[[UB2:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP2:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV2:.*]] = %[[LB2]] to %[[UB2]] step %[[STEP2]] {
-! CHECK: %[[COORD]] = fir.coordinate_of %[[DECLARE]]#0, %[[IV2]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10x2xi32>>, index, index, index) -> !fir.ref<i32>
-! CHECK: fir.store %[[INIT]] to %[[COORD]] : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100x10x2xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100x10x2xi32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10x2xi32>>):
-! CHECK: %[[LB0:.*]] = arith.constant 0 : index
-! CHECK: %[[UB0:.*]] = arith.constant 1 : index
-! CHECK: %[[STEP0:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] {
-! CHECK: %[[LB1:.*]] = arith.constant 0 : index
-! CHECK: %[[UB1:.*]] = arith.constant 9 : index
-! CHECK: %[[STEP1:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] {
-! CHECK: %[[LB2:.*]] = arith.constant 0 : index
-! CHECK: %[[UB2:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP2:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV2:.*]] = %[[LB2]] to %[[UB2]] step %[[STEP2]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV2]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10x2xi32>>, index, index, index) -> !fir.ref<i32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV2]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10x2xi32>>, index, index, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<i32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref<i32>
-! CHECK: }
-! CHECK: }
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100x10x2xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100xext10_ref_100x10xi32 : !fir.ref<!fir.array<100x10xi32>> reduction_operator <add> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100x10xi32>>):
-! CHECK: %[[INIT:.*]] = arith.constant 0 : i32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}}, %{{.*}} : (index, index) -> !fir.shape<2>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100x10xi32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100x10xi32>>, !fir.shape<2>) -> (!fir.ref<!fir.array<100x10xi32>>, !fir.ref<!fir.array<100x10xi32>>)
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100x10xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100x10xi32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10xi32>>):
-! CHECK: %[[LB0:.*]] = arith.constant 0 : index
-! CHECK: %[[UB0:.*]] = arith.constant 9 : index
-! CHECK: %[[STEP0:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV0:.*]] = %[[LB0]] to %[[UB0]] step %[[STEP0]] {
-! CHECK: %[[LB1:.*]] = arith.constant 0 : index
-! CHECK: %[[UB1:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP1:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV1:.*]] = %[[LB1]] to %[[UB1]] step %[[STEP1]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10xi32>>, index, index) -> !fir.ref<i32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV1]], %[[IV0]] : (!fir.ref<!fir.array<100x10xi32>>, index, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD1]] = fir.load %[[COORD1]] : !fir.ref<i32>
-! CHECK: %[[LOAD2]] = fir.load %[[COORD2]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref<i32>
-! CHECK: }
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100x10xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_section_ext100_ref_100xi32 : !fir.ref<!fir.array<100xi32>> reduction_operator <add> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<!fir.array<100xi32>>):
-! CHECK: %[[INIT:.*]] = arith.constant 0 : i32
-! CHECK: %[[SHAPE:.*]] = fir.shape %{{.*}} : (index) -> !fir.shape<1>
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.array<100xi32>
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]](%[[SHAPE]]) {uniq_name = "acc.reduction.init"} : (!fir.ref<!fir.array<100xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<100xi32>>, !fir.ref<!fir.array<100xi32>>)
-! HFLIR: acc.yield %[[DECLARE]]#0 : !fir.ref<!fir.array<100xi32>>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>>, %[[ARG1:.*]]: !fir.ref<!fir.array<100xi32>>):
-! CHECK: %[[LB:.*]] = arith.constant 0 : index
-! CHECK: %[[UB:.*]] = arith.constant 99 : index
-! CHECK: %[[STEP:.*]] = arith.constant 1 : index
-! CHECK: fir.do_loop %[[IV:.*]] = %[[LB]] to %[[UB]] step %[[STEP]] {
-! CHECK: %[[COORD1:.*]] = fir.coordinate_of %[[ARG0]], %[[IV]] : (!fir.ref<!fir.array<100xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[COORD2:.*]] = fir.coordinate_of %[[ARG1]], %[[IV]] : (!fir.ref<!fir.array<100xi32>>, index) -> !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[COORD1]] : !fir.ref<i32>
-! CHECK: %[[LOAD2:.*]] = fir.load %[[COORD2]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD1]], %[[LOAD2]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[COORD1]] : !fir.ref<i32>
-! CHECK: }
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<!fir.array<100xi32>>
-! CHECK: }
-
-! CHECK-LABEL: acc.reduction.recipe @reduction_add_ref_i32 : !fir.ref<i32> reduction_operator <add> init {
-! CHECK: ^bb0(%{{.*}}: !fir.ref<i32>):
-! CHECK: %[[INIT:.*]] = arith.constant 0 : i32
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32
-! CHECK: %[[DECLARE:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "acc.reduction.init"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK: fir.store %[[INIT]] to %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: acc.yield %[[DECLARE]]#0 : !fir.ref<i32>
-! CHECK: } combiner {
-! CHECK: ^bb0(%[[ARG0:.*]]: !fir.ref<i32>, %[[ARG1:.*]]: !fir.ref<i32>):
-! CHECK: %[[LOAD0:.*]] = fir.load %[[ARG0]] : !fir.ref<i32>
-! CHECK: %[[LOAD1:.*]] = fir.load %[[ARG1]] : !fir.ref<i32>
-! CHECK: %[[COMBINED:.*]] = arith.addi %[[LOAD0]], %[[LOAD1]] : i32
-! CHECK: fir.store %[[COMBINED]] to %[[ARG0]] : !fir.ref<i32>
-! CHECK: acc.yield %[[ARG0]] : !fir.ref<i32>
-! CHECK: }
-
-subroutine acc_reduction_add_int(a, b)
- integer :: a(100)
- integer :: i, b
-
- !$acc loop reduction(+:b)
- do i = 1, 100
- b = b + a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_int(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<i32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_add_ref_i32 -> %[[RED_B]] : !fir.ref<i32>)
-
-subroutine acc_reduction_add_int_array_1d(a, b)
- integer :: a(100)
- integer :: i, b(100)
-
- !$acc loop reduction(+:b)
- do i = 1, 100
- b(i) = b(i) + a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_int_array_1d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100_ref_100xi32 -> %[[RED_B]] : !fir.ref<!fir.array<100xi32>>)
-
-subroutine acc_reduction_add_int_array_2d(a, b)
- integer :: a(100, 10), b(100, 10)
- integer :: i, j
-
- !$acc loop collapse(2) reduction(+:b)
- do i = 1, 100
- do j = 1, 10
- b(i, j) = b(i, j) + a(i, j)
- end do
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_int_array_2d(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100x10xi32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10xi32>> {fir.bindc_name = "b"}) {
-! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]]
-! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref<!fir.array<100x10xi32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<100x10xi32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100xext10_ref_100x10xi32 -> %[[RED_ARG1]] : !fir.ref<!fir.array<100x10xi32>>)
-! CHECK: } attributes {collapse = [2]{{.*}}
-
-subroutine acc_reduction_add_int_array_3d(a, b)
- integer :: a(100, 10, 2), b(100, 10, 2)
- integer :: i, j, k
-
- !$acc loop collapse(3) reduction(+:b)
- do i = 1, 100
- do j = 1, 10
- do k = 1, 2
- b(i, j, k) = b(i, j, k) + a(i, j, k)
- end do
- end do
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_int_array_3d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100x10x2xi32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10x2xi32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]]
-! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref<!fir.array<100x10x2xi32>>) bounds(%{{.*}}, %{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<100x10x2xi32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100xext10xext2_ref_100x10x2xi32 -> %[[RED_ARG1]] : !fir.ref<!fir.array<100x10x2xi32>>)
-! CHECK: } attributes {collapse = [3]{{.*}}
-
-subroutine acc_reduction_add_float(a, b)
- real :: a(100), b
- integer :: i
-
- !$acc loop reduction(+:b)
- do i = 1, 100
- b = b + a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_float(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<f32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<f32>) -> !fir.ref<f32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_add_ref_f32 -> %[[RED_B]] : !fir.ref<f32>)
-
-subroutine acc_reduction_add_float_array_1d(a, b)
- real :: a(100), b(100)
- integer :: i
-
- !$acc loop reduction(+:b)
- do i = 1, 100
- b(i) = b(i) + a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_float_array_1d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xf32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_add_section_ext100_ref_100xf32 -> %[[RED_B]] : !fir.ref<!fir.array<100xf32>>)
-
-subroutine acc_reduction_mul_int(a, b)
- integer :: a(100)
- integer :: i, b
-
- !$acc loop reduction(*:b)
- do i = 1, 100
- b = b * a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_mul_int(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<i32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_mul_ref_i32 -> %[[RED_B]] : !fir.ref<i32>)
-
-subroutine acc_reduction_mul_int_array_1d(a, b)
- integer :: a(100)
- integer :: i, b(100)
-
- !$acc loop reduction(*:b)
- do i = 1, 100
- b(i) = b(i) * a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_mul_int_array_1d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_mul_section_ext100_ref_100xi32 -> %[[RED_B]] : !fir.ref<!fir.array<100xi32>>)
-
-subroutine acc_reduction_mul_float(a, b)
- real :: a(100), b
- integer :: i
-
- !$acc loop reduction(*:b)
- do i = 1, 100
- b = b * a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_mul_float(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<f32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<f32>) -> !fir.ref<f32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_mul_ref_f32 -> %[[RED_B]] : !fir.ref<f32>)
-
-subroutine acc_reduction_mul_float_array_1d(a, b)
- real :: a(100), b(100)
- integer :: i
-
- !$acc loop reduction(*:b)
- do i = 1, 100
- b(i) = b(i) * a(i)
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_mul_float_array_1d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xf32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_mul_section_ext100_ref_100xf32 -> %[[RED_B]] : !fir.ref<!fir.array<100xf32>>)
-
-subroutine acc_reduction_min_int(a, b)
- integer :: a(100)
- integer :: i, b
-
- !$acc loop reduction(min:b)
- do i = 1, 100
- b = min(b, a(i))
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_min_int(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<i32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_min_ref_i32 -> %[[RED_B]] : !fir.ref<i32>)
-
-subroutine acc_reduction_min_int_array_1d(a, b)
- integer :: a(100), b(100)
- integer :: i
-
- !$acc loop reduction(min:b)
- do i = 1, 100
- b(i) = min(b(i), a(i))
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_min_int_array_1d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]]
-! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xi32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_min_section_ext100_ref_100xi32 -> %[[RED_ARG1]] : !fir.ref<!fir.array<100xi32>>)
-
-subroutine acc_reduction_min_float(a, b)
- real :: a(100), b
- integer :: i
-
- !$acc loop reduction(min:b)
- do i = 1, 100
- b = min(b, a(i))
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_min_float(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<f32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<f32>) -> !fir.ref<f32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_min_ref_f32 -> %[[RED_B]] : !fir.ref<f32>)
-
-subroutine acc_reduction_min_float_array2d(a, b)
- real :: a(100, 10), b(100, 10)
- integer :: i, j
-
- !$acc loop reduction(min:b) collapse(2)
- do i = 1, 100
- do j = 1, 10
- b(i, j) = min(b(i, j), a(i, j))
- end do
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_min_float_array2d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100x10xf32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10xf32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]]
-! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref<!fir.array<100x10xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<100x10xf32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_min_section_ext100xext10_ref_100x10xf32 -> %[[RED_ARG1]] : !fir.ref<!fir.array<100x10xf32>>)
-! CHECK: attributes {collapse = [2]{{.*}}
-
-subroutine acc_reduction_max_int(a, b)
- integer :: a(100)
- integer :: i, b
-
- !$acc loop reduction(max:b)
- do i = 1, 100
- b = max(b, a(i))
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_max_int(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<i32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<i32>) -> !fir.ref<i32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_max_ref_i32 -> %[[RED_B]] : !fir.ref<i32>)
-
-subroutine acc_reduction_max_int_array2d(a, b)
- integer :: a(100, 10), b(100, 10)
- integer :: i, j
-
- !$acc loop reduction(max:b) collapse(2)
- do i = 1, 100
- do j = 1, 10
- b(i, j) = max(b(i, j), a(i, j))
- end do
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_max_int_array2d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100x10xi32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.array<100x10xi32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]]
-! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref<!fir.array<100x10xi32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<100x10xi32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_max_section_ext100xext10_ref_100x10xi32 -> %[[RED_ARG1]] : !fir.ref<!fir.array<100x10xi32>>)
-
-subroutine acc_reduction_max_float(a, b)
- real :: a(100), b
- integer :: i
-
- !$acc loop reduction(max:b)
- do i = 1, 100
- b = max(b, a(i))
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_max_float(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "a"}, %[[B:.*]]: !fir.ref<f32> {fir.bindc_name = "b"})
-! CHECK: %[[DECLB:.*]]:2 = hlfir.declare %[[B]]
-! CHECK: %[[RED_B:.*]] = acc.reduction varPtr(%[[DECLB]]#0 : !fir.ref<f32>) -> !fir.ref<f32> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_max_ref_f32 -> %[[RED_B]] : !fir.ref<f32>)
-
-subroutine acc_reduction_max_float_array1d(a, b)
- real :: a(100), b(100)
- integer :: i
-
- !$acc loop reduction(max:b)
- do i = 1, 100
- b(i) = max(b(i), a(i))
- end do
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_max_float_array1d(
-! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.array<100xf32>> {fir.bindc_name = "b"})
-! CHECK: %[[DECLARG1:.*]]:2 = hlfir.declare %[[ARG1]]
-! CHECK: %[[RED_ARG1:.*]] = acc.reduction varPtr(%[[DECLARG1]]#0 : !fir.ref<!fir.array<100xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<100xf32>> {name = "b"}
-! CHECK: acc.loop {{.*}} reduction(@reduction_max_section_ext100_ref_100xf32 -> %[[RED_ARG1]] : !fir.ref<!fir.array<100xf32>>)
-
-subroutine acc_reduction_iand()
- integer :: i
- !$acc parallel reduction(iand:i)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_iand()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<i32>) -> !fir.ref<i32> {name = "i"}
-! CHECK: acc.parallel reduction(@reduction_iand_ref_i32 -> %[[RED]] : !fir.ref<i32>)
-
-subroutine acc_reduction_ior()
- integer :: i
- !$acc parallel reduction(ior:i)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_ior()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<i32>) -> !fir.ref<i32> {name = "i"}
-! CHECK: acc.parallel reduction(@reduction_ior_ref_i32 -> %[[RED]] : !fir.ref<i32>)
-
-subroutine acc_reduction_ieor()
- integer :: i
- !$acc parallel reduction(ieor:i)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_ieor()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<i32>) -> !fir.ref<i32> {name = "i"}
-! CHECK: acc.parallel reduction(@reduction_xor_ref_i32 -> %[[RED]] : !fir.ref<i32>)
-
-subroutine acc_reduction_and()
- logical :: l
- !$acc parallel reduction(.and.:l)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_and()
-! CHECK: %[[L:.*]] = fir.alloca !fir.logical<4> {bindc_name = "l", uniq_name = "_QFacc_reduction_andEl"}
-! CHECK: %[[DECLL:.*]]:2 = hlfir.declare %[[L]]
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[DECLL]]#0 : !fir.ref<!fir.logical<4>>) -> !fir.ref<!fir.logical<4>> {name = "l"}
-! CHECK: acc.parallel reduction(@reduction_land_ref_l32 -> %[[RED]] : !fir.ref<!fir.logical<4>>)
-
-subroutine acc_reduction_or()
- logical :: l
- !$acc parallel reduction(.or.:l)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_or()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<!fir.logical<4>>) -> !fir.ref<!fir.logical<4>> {name = "l"}
-! CHECK: acc.parallel reduction(@reduction_lor_ref_l32 -> %[[RED]] : !fir.ref<!fir.logical<4>>)
-
-subroutine acc_reduction_eqv()
- logical :: l
- !$acc parallel reduction(.eqv.:l)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_eqv()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<!fir.logical<4>>) -> !fir.ref<!fir.logical<4>> {name = "l"}
-! CHECK: acc.parallel reduction(@reduction_eqv_ref_l32 -> %[[RED]] : !fir.ref<!fir.logical<4>>)
-
-subroutine acc_reduction_neqv()
- logical :: l
- !$acc parallel reduction(.neqv.:l)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_neqv()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<!fir.logical<4>>) -> !fir.ref<!fir.logical<4>> {name = "l"}
-! CHECK: acc.parallel reduction(@reduction_neqv_ref_l32 -> %[[RED]] : !fir.ref<!fir.logical<4>>)
-
-subroutine acc_reduction_add_cmplx()
- complex :: c
- !$acc parallel reduction(+:c)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_cmplx()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<complex<f32>>) -> !fir.ref<complex<f32>> {name = "c"}
-! CHECK: acc.parallel reduction(@reduction_add_ref_z32 -> %[[RED]] : !fir.ref<complex<f32>>)
-
-subroutine acc_reduction_mul_cmplx()
- complex :: c
- !$acc parallel reduction(*:c)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_mul_cmplx()
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<complex<f32>>) -> !fir.ref<complex<f32>> {name = "c"}
-! CHECK: acc.parallel reduction(@reduction_mul_ref_z32 -> %[[RED]] : !fir.ref<complex<f32>>)
-
-subroutine acc_reduction_add_alloc()
- integer, allocatable :: i
- allocate(i)
- !$acc parallel reduction(+:i)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_alloc()
-! CHECK: %[[ALLOCA:.*]] = fir.alloca !fir.box<!fir.heap<i32>> {bindc_name = "i", uniq_name = "_QFacc_reduction_add_allocEi"}
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ALLOCA]]
-! CHECK: %[[LOAD:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.heap<i32>>>
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.heap<i32>>) -> !fir.heap<i32>
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.heap<i32>) -> !fir.heap<i32> {name = "i"}
-! CHECK: acc.parallel reduction(@reduction_add_heap_i32 -> %[[RED]] : !fir.heap<i32>)
-
-subroutine acc_reduction_add_pointer(i)
- integer, pointer :: i
- !$acc parallel reduction(+:i)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_pointer(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.ptr<i32>>> {fir.bindc_name = "i"})
-! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]]
-! CHECK: %[[LOAD:.*]] = fir.load %[[DECLARG0]]#0 : !fir.ref<!fir.box<!fir.ptr<i32>>>
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[LOAD]] : (!fir.box<!fir.ptr<i32>>) -> !fir.ptr<i32>
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ptr<i32>) -> !fir.ptr<i32> {name = "i"}
-! CHECK: acc.parallel reduction(@reduction_add_ptr_i32 -> %[[RED]] : !fir.ptr<i32>)
-
-subroutine acc_reduction_add_static_slice(a)
- integer :: a(100)
- !$acc parallel reduction(+:a(11:20))
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_static_slice(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<100xi32>> {fir.bindc_name = "a"})
-! CHECK: %[[C100:.*]] = arith.constant 100 : index
-! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]]
-! CHECK: %[[C1:.*]] = arith.constant 1 : index
-! CHECK: %[[LB:.*]] = arith.constant 10 : index
-! CHECK: %[[UB:.*]] = arith.constant 19 : index
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%[[LB]] : index) upperbound(%[[UB]] : index) extent(%[[C100]] : index) stride(%[[C1]] : index) startIdx(%[[C1]] : index)
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[DECLARG0]]#0 : !fir.ref<!fir.array<100xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<100xi32>> {name = "a(11:20)"}
-! CHECK: acc.parallel reduction(@reduction_add_section_lb10.ub19_ref_100xi32 -> %[[RED]] : !fir.ref<!fir.array<100xi32>>)
-
-subroutine acc_reduction_add_dynamic_extent_add(a)
- integer :: a(:)
- !$acc parallel reduction(+:a)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_dynamic_extent_add(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"})
-! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]]
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<!fir.array<?xi32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<?xi32>> {name = "a"}
-! CHECK: acc.parallel reduction(@reduction_add_box_Uxi32 -> %[[RED:.*]] : !fir.ref<!fir.array<?xi32>>)
-
-subroutine acc_reduction_add_assumed_shape_max(a)
- real :: a(:)
- !$acc parallel reduction(max:a)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_assumed_shape_max(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xf32>> {fir.bindc_name = "a"})
-! CHECK: %[[DECLARG0:.*]]:2 = hlfir.declare %[[ARG0]]
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%{{.*}} : !fir.ref<!fir.array<?xf32>>) bounds(%{{.*}}) -> !fir.ref<!fir.array<?xf32>> {name = "a"}
-! CHECK: acc.parallel reduction(@reduction_max_box_Uxf32 -> %[[RED]] : !fir.ref<!fir.array<?xf32>>) {
-
-subroutine acc_reduction_add_dynamic_extent_add_with_section(a)
- integer :: a(:)
- !$acc parallel reduction(+:a(2:4))
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_dynamic_extent_add_with_section(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "a"})
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_reduction_add_dynamic_extent_add_with_sectionEa"} : (!fir.box<!fir.array<?xi32>>, !fir.dscope) -> (!fir.box<!fir.array<?xi32>>, !fir.box<!fir.array<?xi32>>)
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c1{{.*}} : index) upperbound(%c3{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}} : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[DECL]]#0 : (!fir.box<!fir.array<?xi32>>) -> !fir.ref<!fir.array<?xi32>>
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ref<!fir.array<?xi32>>) bounds(%[[BOUND]]) -> !fir.ref<!fir.array<?xi32>> {name = "a(2:4)"}
-! CHECK: acc.parallel reduction(@reduction_add_section_lb1.ub3_box_Uxi32 -> %[[RED]] : !fir.ref<!fir.array<?xi32>>)
-
-subroutine acc_reduction_add_allocatable(a)
- real, allocatable :: a(:)
- !$acc parallel reduction(max:a)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_allocatable(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>> {fir.bindc_name = "a"})
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFacc_reduction_add_allocatableEa"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>)
-! CHECK: %[[BOX:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.heap<!fir.array<?xf32>>) bounds(%{{[0-9]+}}) -> !fir.heap<!fir.array<?xf32>> {name = "a"}
-! CHECK: acc.parallel reduction(@reduction_max_box_heap_Uxf32 -> %[[RED]] : !fir.heap<!fir.array<?xf32>>)
-
-subroutine acc_reduction_add_pointer_array(a)
- real, pointer :: a(:)
- !$acc parallel reduction(max:a)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_add_pointer_array(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>> {fir.bindc_name = "a"})
-! CHECK: %[[DECL:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{[0-9]+}} {fortran_attrs = #fir.var_attrs<pointer>, uniq_name = "_QFacc_reduction_add_pointer_arrayEa"} : (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>)
-! CHECK: %[[BOX:.*]] = fir.load %[[DECL]]#0 : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
-! CHECK: %[[BOUND:.*]] = acc.bounds lowerbound(%c0{{.*}} : index) upperbound(%{{.*}} : index) extent(%{{.*}}#1 : index) stride(%{{.*}}#2 : index) startIdx(%{{.*}}#0 : index) {strideInBytes = true}
-! CHECK: %[[BOX_ADDR:.*]] = fir.box_addr %[[BOX]] : (!fir.box<!fir.ptr<!fir.array<?xf32>>>) -> !fir.ptr<!fir.array<?xf32>>
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[BOX_ADDR]] : !fir.ptr<!fir.array<?xf32>>) bounds(%[[BOUND]]) -> !fir.ptr<!fir.array<?xf32>> {name = "a"}
-! CHECK: acc.parallel reduction(@reduction_max_box_ptr_Uxf32 -> %[[RED]] : !fir.ptr<!fir.array<?xf32>>)
-
-subroutine acc_reduction_max_dynamic_extent_max(a, n)
- integer :: n
- real :: a(n, n)
- !$acc parallel reduction(max:a)
- !$acc end parallel
-end subroutine
-
-! CHECK-LABEL: func.func @_QPacc_reduction_max_dynamic_extent_max(
-! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.array<?x?xf32>> {fir.bindc_name = "a"}, %{{.*}}: !fir.ref<i32> {fir.bindc_name = "n"})
-! CHECK: %[[DECL_A:.*]]:2 = hlfir.declare %[[ARG0]](%{{.*}}) dummy_scope %{{[0-9]+}} {uniq_name = "_QFacc_reduction_max_dynamic_extent_maxEa"} : (!fir.ref<!fir.array<?x?xf32>>, !fir.shape<2>, !fir.dscope) -> (!fir.box<!fir.array<?x?xf32>>, !fir.ref<!fir.array<?x?xf32>>)
-! CHECK: %[[ADDR:.*]] = fir.box_addr %[[DECL_A]]#0 : (!fir.box<!fir.array<?x?xf32>>) -> !fir.ref<!fir.array<?x?xf32>>
-! CHECK: %[[RED:.*]] = acc.reduction varPtr(%[[ADDR]] : !fir.ref<!fir.array<?x?xf32>>) bounds(%{{.*}}, %{{.*}}) -> !fir.ref<!fir.array<?x?xf32>> {name = "a"}
-! CHECK: acc.parallel reduction(@reduction_max_box_UxUxf32 -> %[[RED]] : !fir.ref<!fir.array<?x?xf32>>)
>From b23d6bb4ed90e263b83ace4141bcf6e97a932aa7 Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Wed, 24 Sep 2025 10:56:12 +0200
Subject: [PATCH 10/35] [libc++][C++03] cherry-pick #119801 (#158247)
---
libcxx/include/__cxx03/vector | 2 +-
.../sequences/vector.bool/small_allocator_size.pass.cpp | 2 --
2 files changed, 1 insertion(+), 3 deletions(-)
diff --git a/libcxx/include/__cxx03/vector b/libcxx/include/__cxx03/vector
index 8192ffc1a0dae..4b62e0bf33c46 100644
--- a/libcxx/include/__cxx03/vector
+++ b/libcxx/include/__cxx03/vector
@@ -1891,7 +1891,7 @@ vector<bool, _Allocator>::__recommend(size_type __new_size) const {
const size_type __cap = capacity();
if (__cap >= __ms / 2)
return __ms;
- return std::max(2 * __cap, __align_it(__new_size));
+ return std::max<size_type>(2 * __cap, __align_it(__new_size));
}
// Default constructs __n objects starting at __end_
diff --git a/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp b/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp
index 0136fb0631604..7017351d47865 100644
--- a/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp
+++ b/libcxx/test/std/containers/sequences/vector.bool/small_allocator_size.pass.cpp
@@ -9,8 +9,6 @@
// <vector>
// vector<bool>
-// XFAIL: FROZEN-CXX03-HEADERS-FIXME
-
// This test ensures that std::vector<bool> handles allocator types with small size types
// properly. Related issue: https://llvm.org/PR121713.
>From 98d01ab804c2b75f3533e4d87d0051b832658c8b Mon Sep 17 00:00:00 2001
From: Carl Ritson <carl.ritson at amd.com>
Date: Wed, 24 Sep 2025 18:30:40 +0900
Subject: [PATCH 11/35] [AMDGPU] SILowerControlFlow: ensure EXEC/SCC interval
recompute (#160459)
Ensure live intervals for EXEC and SCC are removed on all paths which
generate instructions.
---
llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
index 115a020f44098..8586d6c18b361 100644
--- a/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp
@@ -293,7 +293,6 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) {
LIS->InsertMachineInstrInMaps(*SetExec);
LIS->InsertMachineInstrInMaps(*NewBr);
- LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
MI.eraseFromParent();
// FIXME: Is there a better way of adjusting the liveness? It shouldn't be
@@ -363,9 +362,6 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) {
RecomputeRegs.insert(SrcReg);
RecomputeRegs.insert(DstReg);
LIS->createAndComputeVirtRegInterval(SaveReg);
-
- // Let this be recomputed.
- LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
}
void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
@@ -828,7 +824,10 @@ bool SILowerControlFlow::run(MachineFunction &MF) {
optimizeEndCf();
- if (LIS) {
+ if (LIS && Changed) {
+ // These will need to be recomputed for insertions and removals.
+ LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
+ LIS->removeAllRegUnitsForPhysReg(AMDGPU::SCC);
for (Register Reg : RecomputeRegs) {
LIS->removeInterval(Reg);
LIS->createAndComputeVirtRegInterval(Reg);
>From 6a654c9ebf7af2dcfb07222d6d8d8848ddf9599a Mon Sep 17 00:00:00 2001
From: Maksim Levental <maksim.levental at gmail.com>
Date: Wed, 24 Sep 2025 05:34:58 -0400
Subject: [PATCH 12/35] [MLIR][Python] add Python wheel build demo/test
(#160388)
This PR demos and tests building Python wheels using
[scikit-build-core](https://scikit-build-core.readthedocs.io/en/latest/).
The test is added to standalone and thus demos "out-of-tree" use cases
but the same `pyproject.toml` will work for in-tree builds. Note, one
can easily pair this with
[cibuildwheel](https://github.com/scikit-build/scikit-build-core/blob/326490975510b2af888e0f319292fc4a9084a033/docs/guide/build.md?plain=1#L221-L226)
to build for all Python versions, OSs, architectures, etc.
---
mlir/examples/standalone/CMakeLists.txt | 8 ++-
mlir/examples/standalone/pyproject.toml | 65 ++++++++++++++++++++
mlir/test/Examples/standalone/lit.local.cfg | 5 ++
mlir/test/Examples/standalone/test.wheel.toy | 31 ++++++++++
mlir/test/lit.site.cfg.py.in | 2 +
5 files changed, 109 insertions(+), 2 deletions(-)
create mode 100644 mlir/examples/standalone/pyproject.toml
create mode 100644 mlir/test/Examples/standalone/test.wheel.toy
diff --git a/mlir/examples/standalone/CMakeLists.txt b/mlir/examples/standalone/CMakeLists.txt
index e2bcda7fa6f0b..c6c49fde12d2e 100644
--- a/mlir/examples/standalone/CMakeLists.txt
+++ b/mlir/examples/standalone/CMakeLists.txt
@@ -63,8 +63,12 @@ if(MLIR_ENABLE_BINDINGS_PYTHON)
include(MLIRDetectPythonEnv)
mlir_configure_python_dev_packages()
# Note: for EXTERNAL_PROJECT_BUILD this must be set from the command line.
- set(MLIR_PYTHON_PACKAGE_PREFIX "mlir_standalone" CACHE STRING "" FORCE)
- set(MLIR_BINDINGS_PYTHON_INSTALL_PREFIX "python_packages/standalone/${MLIR_PYTHON_PACKAGE_PREFIX}" CACHE STRING "" FORCE)
+ if(NOT MLIR_PYTHON_PACKAGE_PREFIX)
+ set(MLIR_PYTHON_PACKAGE_PREFIX "mlir_standalone" CACHE STRING "" FORCE)
+ endif()
+ if(NOT MLIR_BINDINGS_PYTHON_INSTALL_PREFIX)
+ set(MLIR_BINDINGS_PYTHON_INSTALL_PREFIX "python_packages/standalone/${MLIR_PYTHON_PACKAGE_PREFIX}" CACHE STRING "" FORCE)
+ endif()
add_subdirectory(python)
endif()
add_subdirectory(test)
diff --git a/mlir/examples/standalone/pyproject.toml b/mlir/examples/standalone/pyproject.toml
new file mode 100644
index 0000000000000..5a1e6e86513c3
--- /dev/null
+++ b/mlir/examples/standalone/pyproject.toml
@@ -0,0 +1,65 @@
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+# Copyright (c) 2025.
+
+[project]
+name = "standalone-python-bindings"
+dynamic = ["version"]
+requires-python = ">=3.8,<=3.14"
+dependencies = [
+ "numpy>=1.19.5, <=2.1.2",
+ "PyYAML>=5.4.0, <=6.0.1",
+ "ml_dtypes>=0.1.0, <=0.6.0; python_version<'3.13'",
+ "ml_dtypes>=0.5.0, <=0.6.0; python_version>='3.13'",
+]
+
+[project.urls]
+Homepage = "https://github.com/llvm/llvm-project"
+Discussions = "https://discourse.llvm.org/"
+"Issue Tracker" = "https://github.com/llvm/llvm-project/issues?q=is%3Aissue%20state%3Aopen%20label%3Amlir%3Apython%20"
+"Source Code" = "https://github.com/llvm/llvm-project/tree/main/mlir/python"
+
+[build-system]
+requires = [
+ "scikit-build-core>=0.10.7",
+ "typing_extensions>=4.12.2",
+ "nanobind>=2.9, <3.0",
+ "pybind11>=2.10.0, <=2.13.6",
+]
+build-backend = "scikit_build_core.build"
+
+[tool.scikit-build]
+# This is the minimum version of scikit-build-core.
+minimum-version = "0.10.7"
+# This pyproject.toml must be adjacent to the root CMakeLists.txt (wherever project(...) is specified).
+cmake.source-dir = "."
+# This is for installing/distributing the python bindings target and only the python bindings target.
+build.targets = ["StandalonePythonModules"]
+install.components = ["StandalonePythonModules"]
+
+[tool.scikit-build.cmake.define]
+# Optional
+CMAKE_C_COMPILER = { env = "CMAKE_C_COMPILER", default = "" }
+CMAKE_CXX_COMPILER = { env = "CMAKE_CXX_COMPILER", default = "" }
+CMAKE_C_COMPILER_LAUNCHER = { env = "CMAKE_C_COMPILER_LAUNCHER", default = "" }
+CMAKE_CXX_COMPILER_LAUNCHER = { env = "CMAKE_CXX_COMPILER_LAUNCHER", default = "" }
+CMAKE_GENERATOR = { env = "CMAKE_GENERATOR", default = "Ninja" }
+LLVM_USE_LINKER = { env = "LLVM_USE_LINKER", default = "" }
+# Optional but highly recommended (this makes the bindings compatible with other bindings packages
+# by preventing symbol collisions).
+CMAKE_VISIBILITY_INLINES_HIDDEN = "ON"
+CMAKE_C_VISIBILITY_PRESET = "hidden"
+CMAKE_CXX_VISIBILITY_PRESET = "hidden"
+
+# Non-optional (alternatively you could use CMAKE_PREFIX_PATH here).
+MLIR_DIR = { env = "MLIR_DIR", default = "" }
+# Non-optional
+CMAKE_BUILD_TYPE = { env = "CMAKE_BUILD_TYPE", default = "Release" }
+MLIR_ENABLE_BINDINGS_PYTHON = "ON"
+# Effectively non-optional (any downstream project should specify this).
+MLIR_PYTHON_PACKAGE_PREFIX = "mlir_standalone"
+# This specifies the directory in the install directory (i.e., /tmp/pip-wheel/platlib) where _mlir_libs, dialects, etc.
+# are installed. Thus, this will be the package location (and the name of the package) that pip assumes is
+# the root package.
+MLIR_BINDINGS_PYTHON_INSTALL_PREFIX = "mlir_standalone"
diff --git a/mlir/test/Examples/standalone/lit.local.cfg b/mlir/test/Examples/standalone/lit.local.cfg
index 3b12dcbd99e83..a566208b47bc1 100644
--- a/mlir/test/Examples/standalone/lit.local.cfg
+++ b/mlir/test/Examples/standalone/lit.local.cfg
@@ -1,3 +1,5 @@
+import os
+
# Disable with sanitizers for now, this require some more setup apparently.
for san in ["asan", "msan", "ubsan"]:
if san in config.available_features:
@@ -7,7 +9,10 @@ config.substitutions.append(("%cmake_exe", config.host_cmake))
config.substitutions.append(("%cmake_generator", config.host_cmake_generator))
config.substitutions.append(("%host_cxx", config.host_cxx))
config.substitutions.append(("%host_cc", config.host_cc))
+config.substitutions.append(("%hostc_compiler_launcher", config.host_c_compiler_launcher))
+config.substitutions.append(("%hostcxx_compiler_launcher", config.host_cxx_compiler_launcher))
config.substitutions.append(("%enable_libcxx", config.enable_libcxx))
config.substitutions.append(("%mlir_cmake_dir", config.mlir_cmake_dir))
+config.substitutions.append(("%mlir_obj_root", config.mlir_obj_root))
config.substitutions.append(("%llvm_use_linker", config.llvm_use_linker))
config.substitutions.append(("%cmake_build_type", config.cmake_build_type))
diff --git a/mlir/test/Examples/standalone/test.wheel.toy b/mlir/test/Examples/standalone/test.wheel.toy
new file mode 100644
index 0000000000000..1a439d5689939
--- /dev/null
+++ b/mlir/test/Examples/standalone/test.wheel.toy
@@ -0,0 +1,31 @@
+# There's no real issue with windows here, it's just that some CMake generated paths for targets end up being longer
+# than 255 chars when combined with the fact that pip wants to install into a tmp directory buried under
+# C/Users/ContainerAdministrator/AppData/Local/Temp.
+# UNSUPPORTED: target={{.*(windows).*}}
+
+# RUN: export CMAKE_BUILD_TYPE=%cmake_build_type
+# RUN: export CMAKE_CXX_COMPILER=%host_cxx
+# RUN: export CMAKE_CXX_COMPILER_LAUNCHER=%hostcxx_compiler_launcher
+# RUN: export CMAKE_C_COMPILER=%host_cc
+# RUN: export CMAKE_C_COMPILER_LAUNCHER=%hostc_compiler_launcher
+# RUN: export CMAKE_GENERATOR=%cmake_generator
+# RUN: export LLVM_USE_LINKER=%llvm_use_linker
+# RUN: export MLIR_DIR="%mlir_cmake_dir"
+
+# RUN: %python -m pip wheel "%mlir_src_root/examples/standalone" -w "%mlir_obj_root/wheelhouse" -v | tee %t
+
+# RUN: rm -rf "%mlir_obj_root/standalone-python-bindings-install"
+# RUN: %python -m pip install standalone_python_bindings -f "%mlir_obj_root/wheelhouse" --target "%mlir_obj_root/standalone-python-bindings-install" -v | tee -a %t
+
+# RUN: export PYTHONPATH="%mlir_obj_root/standalone-python-bindings-install"
+# RUN: %python "%mlir_src_root/examples/standalone/test/python/smoketest.py" nanobind | tee -a %t
+
+# RUN: FileCheck --input-file=%t %s
+
+# CHECK: Successfully built standalone-python-bindings
+
+# CHECK: module {
+# CHECK: %[[C2:.*]] = arith.constant 2 : i32
+# CHECK: %[[V0:.*]] = standalone.foo %[[C2]] : i32
+# CHECK: }
+
diff --git a/mlir/test/lit.site.cfg.py.in b/mlir/test/lit.site.cfg.py.in
index 2fc595dfabbf5..940e2ad3c4365 100644
--- a/mlir/test/lit.site.cfg.py.in
+++ b/mlir/test/lit.site.cfg.py.in
@@ -15,6 +15,8 @@ config.native_target = "@LLVM_NATIVE_ARCH@"
config.host_os = "@HOST_OS@"
config.host_cc = "@HOST_CC@"
config.host_cxx = "@HOST_CXX@"
+config.host_c_compiler_launcher = "@CMAKE_C_COMPILER_LAUNCHER@"
+config.host_cxx_compiler_launcher = "@CMAKE_CXX_COMPILER_LAUNCHER@"
config.enable_libcxx = "@LLVM_ENABLE_LIBCXX@"
config.host_cmake = "@CMAKE_COMMAND@"
config.host_cmake_generator = "@CMAKE_GENERATOR@"
>From 728d83b695aa5af7feda1d75f0473e431ac7994b Mon Sep 17 00:00:00 2001
From: Carl Ritson <carl.ritson at amd.com>
Date: Wed, 24 Sep 2025 18:42:11 +0900
Subject: [PATCH 13/35] [AMDGPU] Refine GCNHazardRecognizer hasHazard()
(#138841)
Remove recursion to avoid stack overflow on large CFGs.
Avoid worklist for hazard search within single MachineBasicBlock.
Ensure predecessors are visited for all state combinations.
---
.../lib/Target/AMDGPU/GCNHazardRecognizer.cpp | 137 ++++++++++++++----
1 file changed, 106 insertions(+), 31 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
index 81da6325b81ba..1d9a427f2829b 100644
--- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp
@@ -443,40 +443,101 @@ using GetNumWaitStatesFn = function_ref<unsigned int(const MachineInstr &)>;
// Search for a hazard in a block and its predecessors.
template <typename StateT>
static bool
-hasHazard(StateT State,
+hasHazard(StateT InitialState,
function_ref<HazardFnResult(StateT &, const MachineInstr &)> IsHazard,
function_ref<void(StateT &, const MachineInstr &)> UpdateState,
- const MachineBasicBlock *MBB,
- MachineBasicBlock::const_reverse_instr_iterator I,
- DenseSet<const MachineBasicBlock *> &Visited) {
- for (auto E = MBB->instr_rend(); I != E; ++I) {
- // No need to look at parent BUNDLE instructions.
- if (I->isBundle())
- continue;
-
- switch (IsHazard(State, *I)) {
- case HazardFound:
- return true;
- case HazardExpired:
- return false;
- default:
- // Continue search
- break;
+ const MachineBasicBlock *InitialMBB,
+ MachineBasicBlock::const_reverse_instr_iterator InitialI) {
+ struct StateMapKey {
+ SmallVectorImpl<StateT> *States;
+ unsigned Idx;
+ static bool isEqual(const StateMapKey &LHS, const StateMapKey &RHS) {
+ return LHS.States == RHS.States && LHS.Idx == RHS.Idx;
+ }
+ };
+ struct StateMapKeyTraits : DenseMapInfo<StateMapKey> {
+ static inline StateMapKey getEmptyKey() {
+ return {static_cast<SmallVectorImpl<StateT> *>(
+ DenseMapInfo<void *>::getEmptyKey()),
+ DenseMapInfo<unsigned>::getEmptyKey()};
+ }
+ static inline StateMapKey getTombstoneKey() {
+ return {static_cast<SmallVectorImpl<StateT> *>(
+ DenseMapInfo<void *>::getTombstoneKey()),
+ DenseMapInfo<unsigned>::getTombstoneKey()};
+ }
+ static unsigned getHashValue(const StateMapKey &Key) {
+ return StateT::getHashValue((*Key.States)[Key.Idx]);
}
+ static unsigned getHashValue(const StateT &State) {
+ return StateT::getHashValue(State);
+ }
+ static bool isEqual(const StateMapKey &LHS, const StateMapKey &RHS) {
+ const auto EKey = getEmptyKey();
+ const auto TKey = getTombstoneKey();
+ if (StateMapKey::isEqual(LHS, EKey) || StateMapKey::isEqual(RHS, EKey) ||
+ StateMapKey::isEqual(LHS, TKey) || StateMapKey::isEqual(RHS, TKey))
+ return StateMapKey::isEqual(LHS, RHS);
+ return StateT::isEqual((*LHS.States)[LHS.Idx], (*RHS.States)[RHS.Idx]);
+ }
+ static bool isEqual(const StateT &LHS, const StateMapKey &RHS) {
+ if (StateMapKey::isEqual(RHS, getEmptyKey()) ||
+ StateMapKey::isEqual(RHS, getTombstoneKey()))
+ return false;
+ return StateT::isEqual(LHS, (*RHS.States)[RHS.Idx]);
+ }
+ };
- if (I->isInlineAsm() || I->isMetaInstruction())
- continue;
+ SmallDenseMap<StateMapKey, unsigned, 8, StateMapKeyTraits> StateMap;
+ SmallVector<StateT, 8> States;
- UpdateState(State, *I);
- }
+ MachineBasicBlock::const_reverse_instr_iterator I = InitialI;
+ const MachineBasicBlock *MBB = InitialMBB;
+ StateT State = InitialState;
- for (MachineBasicBlock *Pred : MBB->predecessors()) {
- if (!Visited.insert(Pred).second)
- continue;
+ SmallSetVector<std::pair<const MachineBasicBlock *, unsigned>, 16> Worklist;
+ unsigned WorkIdx = 0;
+ for (;;) {
+ bool Expired = false;
+ for (auto E = MBB->instr_rend(); I != E; ++I) {
+ // No need to look at parent BUNDLE instructions.
+ if (I->isBundle())
+ continue;
- if (hasHazard(State, IsHazard, UpdateState, Pred, Pred->instr_rbegin(),
- Visited))
- return true;
+ auto Result = IsHazard(State, *I);
+ if (Result == HazardFound)
+ return true;
+ if (Result == HazardExpired) {
+ Expired = true;
+ break;
+ }
+
+ if (I->isInlineAsm() || I->isMetaInstruction())
+ continue;
+
+ UpdateState(State, *I);
+ }
+
+ if (!Expired) {
+ unsigned StateIdx = States.size();
+ StateMapKey Key = {&States, StateIdx};
+ auto Insertion = StateMap.insert_as(std::pair(Key, StateIdx), State);
+ if (Insertion.second) {
+ States.emplace_back(State);
+ } else {
+ StateIdx = Insertion.first->second;
+ }
+ for (MachineBasicBlock *Pred : MBB->predecessors())
+ Worklist.insert(std::pair(Pred, StateIdx));
+ }
+
+ if (WorkIdx == Worklist.size())
+ break;
+
+ unsigned StateIdx;
+ std::tie(MBB, StateIdx) = Worklist[WorkIdx++];
+ State = States[StateIdx];
+ I = MBB->instr_rbegin();
}
return false;
@@ -1641,6 +1702,15 @@ bool GCNHazardRecognizer::fixVALUPartialForwardingHazard(MachineInstr *MI) {
SmallDenseMap<Register, int, 4> DefPos;
int ExecPos = std::numeric_limits<int>::max();
int VALUs = 0;
+
+ static unsigned getHashValue(const StateType &State) {
+ return hash_combine(State.ExecPos, State.VALUs,
+ hash_combine_range(State.DefPos));
+ }
+ static bool isEqual(const StateType &LHS, const StateType &RHS) {
+ return LHS.DefPos == RHS.DefPos && LHS.ExecPos == RHS.ExecPos &&
+ LHS.VALUs == RHS.VALUs;
+ }
};
StateType State;
@@ -1735,9 +1805,8 @@ bool GCNHazardRecognizer::fixVALUPartialForwardingHazard(MachineInstr *MI) {
State.VALUs += 1;
};
- DenseSet<const MachineBasicBlock *> Visited;
if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(),
- std::next(MI->getReverseIterator()), Visited))
+ std::next(MI->getReverseIterator())))
return false;
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
@@ -1778,6 +1847,13 @@ bool GCNHazardRecognizer::fixVALUTransUseHazard(MachineInstr *MI) {
struct StateType {
int VALUs = 0;
int TRANS = 0;
+
+ static unsigned getHashValue(const StateType &State) {
+ return hash_combine(State.VALUs, State.TRANS);
+ }
+ static bool isEqual(const StateType &LHS, const StateType &RHS) {
+ return LHS.VALUs == RHS.VALUs && LHS.TRANS == RHS.TRANS;
+ }
};
StateType State;
@@ -1813,9 +1889,8 @@ bool GCNHazardRecognizer::fixVALUTransUseHazard(MachineInstr *MI) {
State.TRANS += 1;
};
- DenseSet<const MachineBasicBlock *> Visited;
if (!hasHazard<StateType>(State, IsHazardFn, UpdateStateFn, MI->getParent(),
- std::next(MI->getReverseIterator()), Visited))
+ std::next(MI->getReverseIterator())))
return false;
// Hazard is observed - insert a wait on va_dst counter to ensure hazard is
>From cf03da09e280a961635eeed0cf3fb88b9a004b7e Mon Sep 17 00:00:00 2001
From: Hongyu Chen <xxs_chy at outlook.com>
Date: Wed, 24 Sep 2025 17:46:51 +0800
Subject: [PATCH 14/35] [RISCV] Disable slideup optimization on the
inconsistent element type of EVec and ContainerVT (#159373)
Fixes https://github.com/llvm/llvm-project/issues/159294
The element type of EVecContainerVT and ContainerVT can be different
after promoting integer types.
This patch disables the slideup optimization in that case.
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 +
.../RISCV/rvv/fixed-vectors-int-buildvec.ll | 317 ++++++++++++++++++
2 files changed, 325 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1ae5cb5730dc4..d56d396650a80 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4564,6 +4564,14 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
break;
}
+ // Do not slideup if the element type of EVec is different.
+ if (SlideUp) {
+ MVT EVecEltVT = EVec.getSimpleValueType().getVectorElementType();
+ MVT ContainerEltVT = ContainerVT.getVectorElementType();
+ if (EVecEltVT != ContainerEltVT)
+ SlideUp = false;
+ }
+
if (SlideUp) {
MVT EVecContainerVT = EVec.getSimpleValueType();
// Make sure the original vector has scalable vector type.
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 4bec67d91847d..ca72905a0f39b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -3597,5 +3597,322 @@ define <4 x i32> @buildvec_vredmax_slideup(<8 x i32> %arg0, <8 x i32> %arg1, <8
ret <4 x i32> %255
}
+define <16 x i16> @PR159294(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c) {
+; RV32-ONLY-LABEL: PR159294:
+; RV32-ONLY: # %bb.0: # %entry
+; RV32-ONLY-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-ONLY-NEXT: vmv.x.s a0, v8
+; RV32-ONLY-NEXT: vmv.x.s a1, v9
+; RV32-ONLY-NEXT: vmv.x.s a2, v10
+; RV32-ONLY-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-ONLY-NEXT: vmv.v.x v8, a2
+; RV32-ONLY-NEXT: vslide1down.vx v8, v8, a0
+; RV32-ONLY-NEXT: vslide1down.vx v8, v8, a1
+; RV32-ONLY-NEXT: vslidedown.vi v8, v8, 13
+; RV32-ONLY-NEXT: ret
+;
+; RV32VB-LABEL: PR159294:
+; RV32VB: # %bb.0: # %entry
+; RV32VB-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32VB-NEXT: vmv.x.s a0, v8
+; RV32VB-NEXT: vmv.x.s a1, v10
+; RV32VB-NEXT: slli a0, a0, 16
+; RV32VB-NEXT: zext.h a1, a1
+; RV32VB-NEXT: or a0, a1, a0
+; RV32VB-NEXT: vmv.x.s a1, v9
+; RV32VB-NEXT: vmv.v.i v8, 0
+; RV32VB-NEXT: zext.h a1, a1
+; RV32VB-NEXT: vsetvli zero, zero, e32, m2, tu, ma
+; RV32VB-NEXT: vmv.s.x v8, a0
+; RV32VB-NEXT: vmv.s.x v10, a1
+; RV32VB-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; RV32VB-NEXT: vslideup.vi v8, v10, 1
+; RV32VB-NEXT: ret
+;
+; RV32VB-PACK-LABEL: PR159294:
+; RV32VB-PACK: # %bb.0: # %entry
+; RV32VB-PACK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32VB-PACK-NEXT: vmv.x.s a0, v8
+; RV32VB-PACK-NEXT: vmv.x.s a1, v10
+; RV32VB-PACK-NEXT: vmv.x.s a2, v9
+; RV32VB-PACK-NEXT: pack a0, a1, a0
+; RV32VB-PACK-NEXT: pack a1, a0, a0
+; RV32VB-PACK-NEXT: vmv.v.x v8, a1
+; RV32VB-PACK-NEXT: pack a1, a2, a0
+; RV32VB-PACK-NEXT: vsetvli zero, zero, e32, m2, tu, ma
+; RV32VB-PACK-NEXT: vmv.s.x v8, a0
+; RV32VB-PACK-NEXT: vmv.s.x v10, a1
+; RV32VB-PACK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; RV32VB-PACK-NEXT: vslideup.vi v8, v10, 1
+; RV32VB-PACK-NEXT: ret
+;
+; RV64V-ONLY-LABEL: PR159294:
+; RV64V-ONLY: # %bb.0: # %entry
+; RV64V-ONLY-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64V-ONLY-NEXT: vmv.x.s a0, v8
+; RV64V-ONLY-NEXT: vmv.x.s a1, v9
+; RV64V-ONLY-NEXT: vmv.x.s a2, v10
+; RV64V-ONLY-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64V-ONLY-NEXT: vmv.v.x v8, a2
+; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0
+; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a1
+; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13
+; RV64V-ONLY-NEXT: ret
+;
+; RVA22U64-LABEL: PR159294:
+; RVA22U64: # %bb.0: # %entry
+; RVA22U64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVA22U64-NEXT: vmv.x.s a0, v8
+; RVA22U64-NEXT: vmv.x.s a1, v10
+; RVA22U64-NEXT: slli a0, a0, 16
+; RVA22U64-NEXT: zext.h a1, a1
+; RVA22U64-NEXT: or a0, a0, a1
+; RVA22U64-NEXT: vmv.x.s a1, v9
+; RVA22U64-NEXT: vmv.v.i v8, 0
+; RVA22U64-NEXT: zext.h a1, a1
+; RVA22U64-NEXT: vsetvli zero, zero, e32, m2, tu, ma
+; RVA22U64-NEXT: vmv.s.x v8, a0
+; RVA22U64-NEXT: vmv.s.x v10, a1
+; RVA22U64-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; RVA22U64-NEXT: vslideup.vi v8, v10, 1
+; RVA22U64-NEXT: ret
+;
+; RVA22U64-PACK-LABEL: PR159294:
+; RVA22U64-PACK: # %bb.0: # %entry
+; RVA22U64-PACK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RVA22U64-PACK-NEXT: vmv.x.s a0, v8
+; RVA22U64-PACK-NEXT: vmv.x.s a1, v10
+; RVA22U64-PACK-NEXT: vmv.x.s a2, v9
+; RVA22U64-PACK-NEXT: packw a0, a1, a0
+; RVA22U64-PACK-NEXT: packw a1, a0, a0
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a1
+; RVA22U64-PACK-NEXT: packw a1, a2, a0
+; RVA22U64-PACK-NEXT: vsetvli zero, zero, e32, m2, tu, ma
+; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
+; RVA22U64-PACK-NEXT: vmv.s.x v10, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e32, m1, tu, ma
+; RVA22U64-PACK-NEXT: vslideup.vi v8, v10, 1
+; RVA22U64-PACK-NEXT: ret
+;
+; RV64ZVE32-LABEL: PR159294:
+; RV64ZVE32: # %bb.0: # %entry
+; RV64ZVE32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32-NEXT: vmv.x.s a0, v8
+; RV64ZVE32-NEXT: vmv.x.s a1, v9
+; RV64ZVE32-NEXT: vmv.x.s a2, v10
+; RV64ZVE32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64ZVE32-NEXT: vmv.v.x v8, a2
+; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0
+; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13
+; RV64ZVE32-NEXT: ret
+entry:
+ %vecext3 = extractelement <2 x i32> %a, i32 0
+ %conv4 = trunc i32 %vecext3 to i16
+ %vecinit5 = insertelement <16 x i16> <i16 0, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison, i16 poison>, i16 %conv4, i32 1
+ %vecext7 = extractelement <2 x i32> %b, i32 0
+ %conv8 = trunc i32 %vecext7 to i16
+ %vecinit9 = insertelement <16 x i16> %vecinit5, i16 %conv8, i32 2
+ %vecext59 = extractelement <2 x i32> %c, i32 0
+ %conv60 = trunc i32 %vecext59 to i16
+ %vecinit61 = insertelement <16 x i16> %vecinit9, i16 %conv60, i32 0
+ ret <16 x i16> %vecinit61
+}
+
+define <16 x i32> @PR159294_zext(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; RV32-LABEL: PR159294_zext:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: vmv.x.s a1, v9
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v8, a2
+; RV32-NEXT: lui a2, 16
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: addi a2, a2, -1
+; RV32-NEXT: vand.vx v8, v8, a2
+; RV32-NEXT: ret
+;
+; RV64V-ONLY-LABEL: PR159294_zext:
+; RV64V-ONLY: # %bb.0: # %entry
+; RV64V-ONLY-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64V-ONLY-NEXT: vmv.x.s a0, v8
+; RV64V-ONLY-NEXT: lui a1, 16
+; RV64V-ONLY-NEXT: vmv.x.s a2, v9
+; RV64V-ONLY-NEXT: vmv.x.s a3, v10
+; RV64V-ONLY-NEXT: addi a1, a1, -1
+; RV64V-ONLY-NEXT: and a0, a0, a1
+; RV64V-ONLY-NEXT: and a2, a2, a1
+; RV64V-ONLY-NEXT: and a1, a3, a1
+; RV64V-ONLY-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64V-ONLY-NEXT: vmv.v.x v8, a1
+; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0
+; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a2
+; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13
+; RV64V-ONLY-NEXT: ret
+;
+; RVA22U64-LABEL: PR159294_zext:
+; RVA22U64: # %bb.0: # %entry
+; RVA22U64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RVA22U64-NEXT: vmv.x.s a0, v8
+; RVA22U64-NEXT: vmv.x.s a1, v10
+; RVA22U64-NEXT: slli a0, a0, 48
+; RVA22U64-NEXT: zext.h a1, a1
+; RVA22U64-NEXT: srli a0, a0, 16
+; RVA22U64-NEXT: or a0, a0, a1
+; RVA22U64-NEXT: vmv.x.s a1, v9
+; RVA22U64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RVA22U64-NEXT: vmv.v.i v8, 0
+; RVA22U64-NEXT: zext.h a1, a1
+; RVA22U64-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RVA22U64-NEXT: vmv.s.x v8, a0
+; RVA22U64-NEXT: vmv.s.x v12, a1
+; RVA22U64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
+; RVA22U64-NEXT: vslideup.vi v8, v12, 1
+; RVA22U64-NEXT: ret
+;
+; RVA22U64-PACK-LABEL: PR159294_zext:
+; RVA22U64-PACK: # %bb.0: # %entry
+; RVA22U64-PACK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
+; RVA22U64-PACK-NEXT: vmv1r.v v12, v9
+; RVA22U64-PACK-NEXT: vmv.x.s a0, v8
+; RVA22U64-PACK-NEXT: vmv.x.s a1, v10
+; RVA22U64-PACK-NEXT: pack a2, a0, a0
+; RVA22U64-PACK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a2
+; RVA22U64-PACK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.x.s a2, v12
+; RVA22U64-PACK-NEXT: zext.h a0, a0
+; RVA22U64-PACK-NEXT: zext.h a1, a1
+; RVA22U64-PACK-NEXT: zext.h a2, a2
+; RVA22U64-PACK-NEXT: pack a0, a1, a0
+; RVA22U64-PACK-NEXT: pack a1, a2, a0
+; RVA22U64-PACK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
+; RVA22U64-PACK-NEXT: vmv.s.x v12, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
+; RVA22U64-PACK-NEXT: vslideup.vi v8, v12, 1
+; RVA22U64-PACK-NEXT: ret
+;
+; RV64ZVE32-LABEL: PR159294_zext:
+; RV64ZVE32: # %bb.0: # %entry
+; RV64ZVE32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32-NEXT: vmv.x.s a0, v8
+; RV64ZVE32-NEXT: lui a1, 16
+; RV64ZVE32-NEXT: vmv.x.s a2, v9
+; RV64ZVE32-NEXT: vmv.x.s a3, v10
+; RV64ZVE32-NEXT: addi a1, a1, -1
+; RV64ZVE32-NEXT: and a0, a0, a1
+; RV64ZVE32-NEXT: and a2, a2, a1
+; RV64ZVE32-NEXT: and a1, a3, a1
+; RV64ZVE32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64ZVE32-NEXT: vmv.v.x v8, a1
+; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0
+; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13
+; RV64ZVE32-NEXT: ret
+entry:
+ %vecext3 = extractelement <2 x i16> %a, i32 0
+ %conv4 = zext i16 %vecext3 to i32
+ %vecinit5 = insertelement <16 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>, i32 %conv4, i32 1
+ %vecext7 = extractelement <2 x i16> %b, i32 0
+ %conv8 = zext i16 %vecext7 to i32
+ %vecinit9 = insertelement <16 x i32> %vecinit5, i32 %conv8, i32 2
+ %vecext59 = extractelement <2 x i16> %c, i32 0
+ %conv60 = zext i16 %vecext59 to i32
+ %vecinit61 = insertelement <16 x i32> %vecinit9, i32 %conv60, i32 0
+ ret <16 x i32> %vecinit61
+}
+
+define <16 x i32> @PR159294_sext(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; RV32-LABEL: PR159294_sext:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: vmv.x.s a1, v9
+; RV32-NEXT: vmv.x.s a2, v10
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v8, a2
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a1
+; RV32-NEXT: vslidedown.vi v8, v8, 13
+; RV32-NEXT: ret
+;
+; RV64V-ONLY-LABEL: PR159294_sext:
+; RV64V-ONLY: # %bb.0: # %entry
+; RV64V-ONLY-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64V-ONLY-NEXT: vmv.x.s a0, v8
+; RV64V-ONLY-NEXT: vmv.x.s a1, v9
+; RV64V-ONLY-NEXT: vmv.x.s a2, v10
+; RV64V-ONLY-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64V-ONLY-NEXT: vmv.v.x v8, a2
+; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a0
+; RV64V-ONLY-NEXT: vslide1down.vx v8, v8, a1
+; RV64V-ONLY-NEXT: vslidedown.vi v8, v8, 13
+; RV64V-ONLY-NEXT: ret
+;
+; RVA22U64-LABEL: PR159294_sext:
+; RVA22U64: # %bb.0: # %entry
+; RVA22U64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RVA22U64-NEXT: vmv.x.s a0, v8
+; RVA22U64-NEXT: vmv.x.s a1, v10
+; RVA22U64-NEXT: slli a0, a0, 32
+; RVA22U64-NEXT: add.uw a0, a1, a0
+; RVA22U64-NEXT: vmv.x.s a1, v9
+; RVA22U64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RVA22U64-NEXT: vmv.v.i v8, 0
+; RVA22U64-NEXT: zext.w a1, a1
+; RVA22U64-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RVA22U64-NEXT: vmv.s.x v8, a0
+; RVA22U64-NEXT: vmv.s.x v12, a1
+; RVA22U64-NEXT: vsetivli zero, 2, e64, m1, tu, ma
+; RVA22U64-NEXT: vslideup.vi v8, v12, 1
+; RVA22U64-NEXT: ret
+;
+; RVA22U64-PACK-LABEL: PR159294_sext:
+; RVA22U64-PACK: # %bb.0: # %entry
+; RVA22U64-PACK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RVA22U64-PACK-NEXT: vmv.x.s a0, v8
+; RVA22U64-PACK-NEXT: vmv.x.s a1, v10
+; RVA22U64-PACK-NEXT: vmv.x.s a2, v9
+; RVA22U64-PACK-NEXT: pack a0, a1, a0
+; RVA22U64-PACK-NEXT: pack a1, a0, a0
+; RVA22U64-PACK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RVA22U64-PACK-NEXT: vmv.v.x v8, a1
+; RVA22U64-PACK-NEXT: pack a1, a2, a0
+; RVA22U64-PACK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
+; RVA22U64-PACK-NEXT: vmv.s.x v8, a0
+; RVA22U64-PACK-NEXT: vmv.s.x v12, a1
+; RVA22U64-PACK-NEXT: vsetivli zero, 2, e64, m1, tu, ma
+; RVA22U64-PACK-NEXT: vslideup.vi v8, v12, 1
+; RVA22U64-PACK-NEXT: ret
+;
+; RV64ZVE32-LABEL: PR159294_sext:
+; RV64ZVE32: # %bb.0: # %entry
+; RV64ZVE32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; RV64ZVE32-NEXT: vmv.x.s a0, v8
+; RV64ZVE32-NEXT: vmv.x.s a1, v9
+; RV64ZVE32-NEXT: vmv.x.s a2, v10
+; RV64ZVE32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV64ZVE32-NEXT: vmv.v.x v8, a2
+; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a0
+; RV64ZVE32-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32-NEXT: vslidedown.vi v8, v8, 13
+; RV64ZVE32-NEXT: ret
+entry:
+ %vecext3 = extractelement <2 x i16> %a, i32 0
+ %conv4 = sext i16 %vecext3 to i32
+ %vecinit5 = insertelement <16 x i32> <i32 0, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison, i32 poison>, i32 %conv4, i32 1
+ %vecext7 = extractelement <2 x i16> %b, i32 0
+ %conv8 = sext i16 %vecext7 to i32
+ %vecinit9 = insertelement <16 x i32> %vecinit5, i32 %conv8, i32 2
+ %vecext59 = extractelement <2 x i16> %c, i32 0
+ %conv60 = sext i16 %vecext59 to i32
+ %vecinit61 = insertelement <16 x i32> %vecinit9, i32 %conv60, i32 0
+ ret <16 x i32> %vecinit61
+}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; RV64: {{.*}}
>From b2f27ef2d1473884e2fd3f2b5a5cdf1a56bf2480 Mon Sep 17 00:00:00 2001
From: Alex Bradbury <asb at igalia.com>
Date: Wed, 24 Sep 2025 10:50:01 +0100
Subject: [PATCH 15/35] [RISCV] Set riscv-fpimm-cost threshold to 3 by default
(#159352)
`-riscv-fp-imm-cost` controls the threshold at which the constant pool
is used for float constants rather than generating directly (typically
into a GPR followed by an `fmv`). The value used for this knob indicates
the number of instructions that can be used to produce the value
(otherwise we fall back to the constant pool). Upping to to 3 covers a
huge number of additional constants (see
<https://github.com/llvm/llvm-project/issues/153402>), e.g. most whole
numbers which can be generated through lui+shift+fmv. As in general we
struggle with efficient code generation for constant pool accesses,
reducing the number of constant pool accesses is beneficial. We are
typically replacing a two-instruction sequence (which includes a load)
with a three instruction sequence (two simple arithmetic operations plus
a fmv), which.
The CHECK prefixes for various tests had to be updated to avoid
conflicts leading to check lines being dropped altogether (see
<https://github.com/llvm/llvm-project/pull/159321> for a change to
update_llc_test_checks to aid diagnosing this).
---
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
llvm/test/CodeGen/RISCV/bfloat-convert.ll | 219 +--
llvm/test/CodeGen/RISCV/bfloat-imm.ll | 5 +-
llvm/test/CodeGen/RISCV/calling-conv-half.ll | 284 ++--
llvm/test/CodeGen/RISCV/codemodel-lowering.ll | 106 +-
llvm/test/CodeGen/RISCV/double-convert.ll | 62 +-
llvm/test/CodeGen/RISCV/double-imm.ll | 5 +-
llvm/test/CodeGen/RISCV/double-intrinsics.ll | 30 +-
llvm/test/CodeGen/RISCV/double-round-conv.ll | 25 +-
llvm/test/CodeGen/RISCV/double-zfa.ll | 51 +-
llvm/test/CodeGen/RISCV/float-convert.ll | 62 +-
llvm/test/CodeGen/RISCV/float-imm.ll | 5 +-
.../CodeGen/RISCV/float-round-conv-sat.ll | 60 +-
llvm/test/CodeGen/RISCV/half-arith.ll | 47 +-
llvm/test/CodeGen/RISCV/half-convert.ll | 740 +++++-----
llvm/test/CodeGen/RISCV/half-imm.ll | 15 +-
llvm/test/CodeGen/RISCV/half-intrinsics.ll | 30 +-
.../test/CodeGen/RISCV/half-round-conv-sat.ll | 180 ++-
llvm/test/CodeGen/RISCV/half-round-conv.ll | 75 +-
llvm/test/CodeGen/RISCV/half-select-fcmp.ll | 32 +-
llvm/test/CodeGen/RISCV/half-zfa-fli.ll | 65 +-
llvm/test/CodeGen/RISCV/half-zfa.ll | 15 +-
.../CodeGen/RISCV/repeated-fp-divisors.ll | 5 +-
.../test/CodeGen/RISCV/rv64-double-convert.ll | 21 +-
llvm/test/CodeGen/RISCV/rv64-float-convert.ll | 12 +-
llvm/test/CodeGen/RISCV/rv64-half-convert.ll | 37 +-
llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll | 796 ++++++----
.../CodeGen/RISCV/rvv/double-round-conv.ll | 120 +-
.../RISCV/rvv/fceil-constrained-sdnode.ll | 246 ++--
llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll | 338 ++++-
.../RISCV/rvv/ffloor-constrained-sdnode.ll | 246 ++--
llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll | 338 ++++-
.../RISCV/rvv/fixed-vectors-ceil-vp.ll | 1298 +++++++++++++----
.../fixed-vectors-fceil-constrained-sdnode.ll | 246 ++--
...fixed-vectors-ffloor-constrained-sdnode.ll | 246 ++--
.../RISCV/rvv/fixed-vectors-floor-vp.ll | 1298 +++++++++++++----
...d-vectors-fnearbyint-constrained-sdnode.ll | 188 ++-
.../CodeGen/RISCV/rvv/fixed-vectors-fp.ll | 548 +++++--
...fixed-vectors-fround-constrained-sdnode.ll | 246 ++--
.../CodeGen/RISCV/rvv/fixed-vectors-fround.ll | 338 ++++-
...d-vectors-froundeven-constrained-sdnode.ll | 246 ++--
.../RISCV/rvv/fixed-vectors-froundeven.ll | 338 ++++-
...fixed-vectors-ftrunc-constrained-sdnode.ll | 222 ++-
.../RISCV/rvv/fixed-vectors-nearbyint-vp.ll | 782 ++++++----
.../RISCV/rvv/fixed-vectors-reduction-fp.ll | 1000 ++++++++-----
.../RISCV/rvv/fixed-vectors-rint-vp.ll | 698 ++++++---
.../RISCV/rvv/fixed-vectors-round-vp.ll | 1298 +++++++++++++----
.../RISCV/rvv/fixed-vectors-roundeven-vp.ll | 1298 +++++++++++++----
.../RISCV/rvv/fixed-vectors-roundtozero-vp.ll | 1298 +++++++++++++----
.../RISCV/rvv/fixed-vectors-shuffle-fp.ll | 181 ++-
llvm/test/CodeGen/RISCV/rvv/floor-vp.ll | 1298 +++++++++++++----
.../rvv/fnearbyint-constrained-sdnode.ll | 246 ++--
.../CodeGen/RISCV/rvv/fnearbyint-sdnode.ll | 338 ++++-
llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll | 298 +++-
.../RISCV/rvv/fround-constrained-sdnode.ll | 246 ++--
llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll | 338 ++++-
.../rvv/froundeven-constrained-sdnode.ll | 246 ++--
.../CodeGen/RISCV/rvv/froundeven-sdnode.ll | 338 ++++-
.../RISCV/rvv/ftrunc-constrained-sdnode.ll | 222 ++-
llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll | 298 +++-
.../test/CodeGen/RISCV/rvv/half-round-conv.ll | 60 +-
llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll | 1298 +++++++++++++----
llvm/test/CodeGen/RISCV/rvv/rint-vp.ll | 1158 ++++++++++++---
llvm/test/CodeGen/RISCV/rvv/round-vp.ll | 1298 +++++++++++++----
llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll | 1298 +++++++++++++----
llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll | 1298 +++++++++++++----
.../test/CodeGen/RISCV/rvv/vfma-vp-combine.ll | 77 +-
.../RISCV/rvv/vreductions-fp-sdnode-f16.ll | 10 +-
.../RISCV/rvv/vreductions-fp-sdnode.ll | 8 +-
.../RISCV/rvv/vreductions-fp-vp-f16.ll | 10 +-
.../RISCV/rvv/vsetvli-insert-crossbb.ll | 24 +-
llvm/test/CodeGen/RISCV/srodata.ll | 15 -
72 files changed, 19694 insertions(+), 6771 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index d56d396650a80..50649cf3caba4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -79,7 +79,7 @@ static cl::opt<int>
FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden,
cl::desc("Give the maximum number of instructions that we will "
"use for creating a floating-point immediate value"),
- cl::init(2));
+ cl::init(3));
static cl::opt<bool>
ReassocShlAddiAdd("reassoc-shl-addi-add", cl::Hidden,
diff --git a/llvm/test/CodeGen/RISCV/bfloat-convert.ll b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
index 6207a17734d62..73ff888e44b3b 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-convert.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-convert.ll
@@ -51,13 +51,14 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind {
; CHECK32ZFBFMIN-LABEL: fcvt_si_bf16_sat:
; CHECK32ZFBFMIN: # %bb.0: # %start
; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK32ZFBFMIN-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK32ZFBFMIN-NEXT: feq.s a1, fa5, fa5
-; CHECK32ZFBFMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; CHECK32ZFBFMIN-NEXT: lui a0, 815104
-; CHECK32ZFBFMIN-NEXT: fmv.w.x fa3, a0
-; CHECK32ZFBFMIN-NEXT: fmax.s fa5, fa5, fa3
-; CHECK32ZFBFMIN-NEXT: neg a0, a1
+; CHECK32ZFBFMIN-NEXT: lui a1, 290816
+; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, a0
+; CHECK32ZFBFMIN-NEXT: feq.s a0, fa5, fa5
+; CHECK32ZFBFMIN-NEXT: addi a1, a1, -512
+; CHECK32ZFBFMIN-NEXT: neg a0, a0
+; CHECK32ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, a1
; CHECK32ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK32ZFBFMIN-NEXT: fcvt.w.s a1, fa5, rtz
; CHECK32ZFBFMIN-NEXT: and a0, a0, a1
@@ -68,12 +69,13 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind {
; RV32ID-NEXT: fmv.x.w a0, fa0
; RV32ID-NEXT: lui a1, 815104
; RV32ID-NEXT: fmv.w.x fa5, a1
-; RV32ID-NEXT: lui a1, %hi(.LCPI1_0)
+; RV32ID-NEXT: lui a1, 290816
; RV32ID-NEXT: slli a0, a0, 16
-; RV32ID-NEXT: flw fa4, %lo(.LCPI1_0)(a1)
-; RV32ID-NEXT: fmv.w.x fa3, a0
-; RV32ID-NEXT: feq.s a0, fa3, fa3
-; RV32ID-NEXT: fmax.s fa5, fa3, fa5
+; RV32ID-NEXT: addi a1, a1, -512
+; RV32ID-NEXT: fmv.w.x fa4, a0
+; RV32ID-NEXT: feq.s a0, fa4, fa4
+; RV32ID-NEXT: fmax.s fa5, fa4, fa5
+; RV32ID-NEXT: fmv.w.x fa4, a1
; RV32ID-NEXT: neg a0, a0
; RV32ID-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-NEXT: fcvt.w.s a1, fa5, rtz
@@ -83,13 +85,14 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind {
; CHECK64ZFBFMIN-LABEL: fcvt_si_bf16_sat:
; CHECK64ZFBFMIN: # %bb.0: # %start
; CHECK64ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK64ZFBFMIN-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK64ZFBFMIN-NEXT: feq.s a1, fa5, fa5
-; CHECK64ZFBFMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; CHECK64ZFBFMIN-NEXT: lui a0, 815104
-; CHECK64ZFBFMIN-NEXT: fmv.w.x fa3, a0
-; CHECK64ZFBFMIN-NEXT: fmax.s fa5, fa5, fa3
-; CHECK64ZFBFMIN-NEXT: neg a0, a1
+; CHECK64ZFBFMIN-NEXT: lui a1, 290816
+; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, a0
+; CHECK64ZFBFMIN-NEXT: feq.s a0, fa5, fa5
+; CHECK64ZFBFMIN-NEXT: addi a1, a1, -512
+; CHECK64ZFBFMIN-NEXT: neg a0, a0
+; CHECK64ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, a1
; CHECK64ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK64ZFBFMIN-NEXT: fcvt.l.s a1, fa5, rtz
; CHECK64ZFBFMIN-NEXT: and a0, a0, a1
@@ -100,12 +103,13 @@ define i16 @fcvt_si_bf16_sat(bfloat %a) nounwind {
; RV64ID-NEXT: fmv.x.w a0, fa0
; RV64ID-NEXT: lui a1, 815104
; RV64ID-NEXT: fmv.w.x fa5, a1
-; RV64ID-NEXT: lui a1, %hi(.LCPI1_0)
+; RV64ID-NEXT: lui a1, 290816
; RV64ID-NEXT: slli a0, a0, 16
-; RV64ID-NEXT: flw fa4, %lo(.LCPI1_0)(a1)
-; RV64ID-NEXT: fmv.w.x fa3, a0
-; RV64ID-NEXT: feq.s a0, fa3, fa3
-; RV64ID-NEXT: fmax.s fa5, fa3, fa5
+; RV64ID-NEXT: addi a1, a1, -512
+; RV64ID-NEXT: fmv.w.x fa4, a0
+; RV64ID-NEXT: feq.s a0, fa4, fa4
+; RV64ID-NEXT: fmax.s fa5, fa4, fa5
+; RV64ID-NEXT: fmv.w.x fa4, a1
; RV64ID-NEXT: neg a0, a0
; RV64ID-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-NEXT: fcvt.l.s a1, fa5, rtz
@@ -152,49 +156,53 @@ define i16 @fcvt_ui_bf16(bfloat %a) nounwind {
define i16 @fcvt_ui_bf16_sat(bfloat %a) nounwind {
; CHECK32ZFBFMIN-LABEL: fcvt_ui_bf16_sat:
; CHECK32ZFBFMIN: # %bb.0: # %start
-; CHECK32ZFBFMIN-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK32ZFBFMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa4, fa0
-; CHECK32ZFBFMIN-NEXT: fmv.w.x fa3, zero
-; CHECK32ZFBFMIN-NEXT: fmax.s fa4, fa4, fa3
-; CHECK32ZFBFMIN-NEXT: fmin.s fa5, fa4, fa5
+; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, zero
+; CHECK32ZFBFMIN-NEXT: lui a0, 292864
+; CHECK32ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK32ZFBFMIN-NEXT: addi a0, a0, -256
+; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, a0
+; CHECK32ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK32ZFBFMIN-NEXT: fcvt.wu.s a0, fa5, rtz
; CHECK32ZFBFMIN-NEXT: ret
;
; RV32ID-LABEL: fcvt_ui_bf16_sat:
; RV32ID: # %bb.0: # %start
-; RV32ID-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
; RV32ID-NEXT: fmv.x.w a0, fa0
+; RV32ID-NEXT: fmv.w.x fa5, zero
; RV32ID-NEXT: slli a0, a0, 16
; RV32ID-NEXT: fmv.w.x fa4, a0
-; RV32ID-NEXT: fmv.w.x fa3, zero
-; RV32ID-NEXT: fmax.s fa4, fa4, fa3
-; RV32ID-NEXT: fmin.s fa5, fa4, fa5
+; RV32ID-NEXT: lui a0, 292864
+; RV32ID-NEXT: addi a0, a0, -256
+; RV32ID-NEXT: fmax.s fa5, fa4, fa5
+; RV32ID-NEXT: fmv.w.x fa4, a0
+; RV32ID-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-NEXT: ret
;
; CHECK64ZFBFMIN-LABEL: fcvt_ui_bf16_sat:
; CHECK64ZFBFMIN: # %bb.0: # %start
-; CHECK64ZFBFMIN-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK64ZFBFMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; CHECK64ZFBFMIN-NEXT: fcvt.s.bf16 fa4, fa0
-; CHECK64ZFBFMIN-NEXT: fmv.w.x fa3, zero
-; CHECK64ZFBFMIN-NEXT: fmax.s fa4, fa4, fa3
-; CHECK64ZFBFMIN-NEXT: fmin.s fa5, fa4, fa5
+; CHECK64ZFBFMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, zero
+; CHECK64ZFBFMIN-NEXT: lui a0, 292864
+; CHECK64ZFBFMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK64ZFBFMIN-NEXT: addi a0, a0, -256
+; CHECK64ZFBFMIN-NEXT: fmv.w.x fa4, a0
+; CHECK64ZFBFMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK64ZFBFMIN-NEXT: fcvt.lu.s a0, fa5, rtz
; CHECK64ZFBFMIN-NEXT: ret
;
; RV64ID-LABEL: fcvt_ui_bf16_sat:
; RV64ID: # %bb.0: # %start
-; RV64ID-NEXT: lui a0, %hi(.LCPI3_0)
-; RV64ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
; RV64ID-NEXT: fmv.x.w a0, fa0
+; RV64ID-NEXT: fmv.w.x fa5, zero
; RV64ID-NEXT: slli a0, a0, 16
; RV64ID-NEXT: fmv.w.x fa4, a0
-; RV64ID-NEXT: fmv.w.x fa3, zero
-; RV64ID-NEXT: fmax.s fa4, fa4, fa3
-; RV64ID-NEXT: fmin.s fa5, fa4, fa5
+; RV64ID-NEXT: lui a0, 292864
+; RV64ID-NEXT: addi a0, a0, -256
+; RV64ID-NEXT: fmax.s fa5, fa4, fa5
+; RV64ID-NEXT: fmv.w.x fa4, a0
+; RV64ID-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-NEXT: ret
start:
@@ -472,20 +480,21 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
; RV32IZFBFMIN-NEXT: # %bb.1: # %start
; RV32IZFBFMIN-NEXT: mv a2, a1
; RV32IZFBFMIN-NEXT: .LBB10_2: # %start
-; RV32IZFBFMIN-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32IZFBFMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32IZFBFMIN-NEXT: lui a1, 389120
+; RV32IZFBFMIN-NEXT: addi a1, a1, -1
+; RV32IZFBFMIN-NEXT: fmv.w.x fa5, a1
; RV32IZFBFMIN-NEXT: flt.s a1, fa5, fs0
; RV32IZFBFMIN-NEXT: beqz a1, .LBB10_4
; RV32IZFBFMIN-NEXT: # %bb.3:
; RV32IZFBFMIN-NEXT: addi a2, a3, -1
; RV32IZFBFMIN-NEXT: .LBB10_4: # %start
; RV32IZFBFMIN-NEXT: feq.s a3, fs0, fs0
-; RV32IZFBFMIN-NEXT: neg a4, a1
-; RV32IZFBFMIN-NEXT: neg a1, s0
+; RV32IZFBFMIN-NEXT: neg a4, s0
+; RV32IZFBFMIN-NEXT: neg a5, a1
; RV32IZFBFMIN-NEXT: neg a3, a3
-; RV32IZFBFMIN-NEXT: and a0, a1, a0
+; RV32IZFBFMIN-NEXT: and a0, a4, a0
; RV32IZFBFMIN-NEXT: and a1, a3, a2
-; RV32IZFBFMIN-NEXT: or a0, a4, a0
+; RV32IZFBFMIN-NEXT: or a0, a5, a0
; RV32IZFBFMIN-NEXT: and a0, a3, a0
; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -511,20 +520,21 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
; R32IDZFBFMIN-NEXT: # %bb.1: # %start
; R32IDZFBFMIN-NEXT: mv a2, a1
; R32IDZFBFMIN-NEXT: .LBB10_2: # %start
-; R32IDZFBFMIN-NEXT: lui a1, %hi(.LCPI10_0)
-; R32IDZFBFMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; R32IDZFBFMIN-NEXT: lui a1, 389120
+; R32IDZFBFMIN-NEXT: addi a1, a1, -1
+; R32IDZFBFMIN-NEXT: fmv.w.x fa5, a1
; R32IDZFBFMIN-NEXT: flt.s a1, fa5, fs0
; R32IDZFBFMIN-NEXT: beqz a1, .LBB10_4
; R32IDZFBFMIN-NEXT: # %bb.3:
; R32IDZFBFMIN-NEXT: addi a2, a3, -1
; R32IDZFBFMIN-NEXT: .LBB10_4: # %start
; R32IDZFBFMIN-NEXT: feq.s a3, fs0, fs0
-; R32IDZFBFMIN-NEXT: neg a4, a1
-; R32IDZFBFMIN-NEXT: neg a1, s0
+; R32IDZFBFMIN-NEXT: neg a4, s0
+; R32IDZFBFMIN-NEXT: neg a5, a1
; R32IDZFBFMIN-NEXT: neg a3, a3
-; R32IDZFBFMIN-NEXT: and a0, a1, a0
+; R32IDZFBFMIN-NEXT: and a0, a4, a0
; R32IDZFBFMIN-NEXT: and a1, a3, a2
-; R32IDZFBFMIN-NEXT: or a0, a4, a0
+; R32IDZFBFMIN-NEXT: or a0, a5, a0
; R32IDZFBFMIN-NEXT: and a0, a3, a0
; R32IDZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; R32IDZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -552,8 +562,9 @@ define i64 @fcvt_l_bf16_sat(bfloat %a) nounwind {
; RV32ID-NEXT: # %bb.1: # %start
; RV32ID-NEXT: mv a2, a1
; RV32ID-NEXT: .LBB10_2: # %start
-; RV32ID-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32ID-NEXT: lui a1, 389120
+; RV32ID-NEXT: addi a1, a1, -1
+; RV32ID-NEXT: fmv.w.x fa5, a1
; RV32ID-NEXT: flt.s a1, fa5, fs0
; RV32ID-NEXT: beqz a1, .LBB10_4
; RV32ID-NEXT: # %bb.3:
@@ -641,30 +652,59 @@ define i64 @fcvt_lu_bf16(bfloat %a) nounwind {
}
define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind {
-; CHECK32ZFBFMIN-LABEL: fcvt_lu_bf16_sat:
-; CHECK32ZFBFMIN: # %bb.0: # %start
-; CHECK32ZFBFMIN-NEXT: addi sp, sp, -16
-; CHECK32ZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32ZFBFMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; CHECK32ZFBFMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; CHECK32ZFBFMIN-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK32ZFBFMIN-NEXT: flw fa5, %lo(.LCPI12_0)(a0)
-; CHECK32ZFBFMIN-NEXT: fcvt.s.bf16 fa0, fa0
-; CHECK32ZFBFMIN-NEXT: fmv.w.x fa4, zero
-; CHECK32ZFBFMIN-NEXT: fle.s a0, fa4, fa0
-; CHECK32ZFBFMIN-NEXT: flt.s a1, fa5, fa0
-; CHECK32ZFBFMIN-NEXT: neg s0, a1
-; CHECK32ZFBFMIN-NEXT: neg s1, a0
-; CHECK32ZFBFMIN-NEXT: call __fixunssfdi
-; CHECK32ZFBFMIN-NEXT: and a0, s1, a0
-; CHECK32ZFBFMIN-NEXT: and a1, s1, a1
-; CHECK32ZFBFMIN-NEXT: or a0, s0, a0
-; CHECK32ZFBFMIN-NEXT: or a1, s0, a1
-; CHECK32ZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK32ZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
-; CHECK32ZFBFMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
-; CHECK32ZFBFMIN-NEXT: addi sp, sp, 16
-; CHECK32ZFBFMIN-NEXT: ret
+; RV32IZFBFMIN-LABEL: fcvt_lu_bf16_sat:
+; RV32IZFBFMIN: # %bb.0: # %start
+; RV32IZFBFMIN-NEXT: addi sp, sp, -16
+; RV32IZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IZFBFMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IZFBFMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
+; RV32IZFBFMIN-NEXT: fcvt.s.bf16 fs0, fa0
+; RV32IZFBFMIN-NEXT: fmv.w.x fa5, zero
+; RV32IZFBFMIN-NEXT: fle.s a0, fa5, fs0
+; RV32IZFBFMIN-NEXT: neg s0, a0
+; RV32IZFBFMIN-NEXT: fmv.s fa0, fs0
+; RV32IZFBFMIN-NEXT: call __fixunssfdi
+; RV32IZFBFMIN-NEXT: and a0, s0, a0
+; RV32IZFBFMIN-NEXT: lui a2, 391168
+; RV32IZFBFMIN-NEXT: and a1, s0, a1
+; RV32IZFBFMIN-NEXT: addi a2, a2, -1
+; RV32IZFBFMIN-NEXT: fmv.w.x fa5, a2
+; RV32IZFBFMIN-NEXT: flt.s a2, fa5, fs0
+; RV32IZFBFMIN-NEXT: neg a2, a2
+; RV32IZFBFMIN-NEXT: or a0, a2, a0
+; RV32IZFBFMIN-NEXT: or a1, a2, a1
+; RV32IZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IZFBFMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
+; RV32IZFBFMIN-NEXT: addi sp, sp, 16
+; RV32IZFBFMIN-NEXT: ret
+;
+; R32IDZFBFMIN-LABEL: fcvt_lu_bf16_sat:
+; R32IDZFBFMIN: # %bb.0: # %start
+; R32IDZFBFMIN-NEXT: addi sp, sp, -16
+; R32IDZFBFMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; R32IDZFBFMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; R32IDZFBFMIN-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
+; R32IDZFBFMIN-NEXT: fcvt.s.bf16 fs0, fa0
+; R32IDZFBFMIN-NEXT: fmv.w.x fa5, zero
+; R32IDZFBFMIN-NEXT: fle.s a0, fa5, fs0
+; R32IDZFBFMIN-NEXT: neg s0, a0
+; R32IDZFBFMIN-NEXT: fmv.s fa0, fs0
+; R32IDZFBFMIN-NEXT: call __fixunssfdi
+; R32IDZFBFMIN-NEXT: and a0, s0, a0
+; R32IDZFBFMIN-NEXT: lui a2, 391168
+; R32IDZFBFMIN-NEXT: and a1, s0, a1
+; R32IDZFBFMIN-NEXT: addi a2, a2, -1
+; R32IDZFBFMIN-NEXT: fmv.w.x fa5, a2
+; R32IDZFBFMIN-NEXT: flt.s a2, fa5, fs0
+; R32IDZFBFMIN-NEXT: neg a2, a2
+; R32IDZFBFMIN-NEXT: or a0, a2, a0
+; R32IDZFBFMIN-NEXT: or a1, a2, a1
+; R32IDZFBFMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; R32IDZFBFMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; R32IDZFBFMIN-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
+; R32IDZFBFMIN-NEXT: addi sp, sp, 16
+; R32IDZFBFMIN-NEXT: ret
;
; RV32ID-LABEL: fcvt_lu_bf16_sat:
; RV32ID: # %bb.0: # %start
@@ -673,15 +713,16 @@ define i64 @fcvt_lu_bf16_sat(bfloat %a) nounwind {
; RV32ID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32ID-NEXT: fmv.x.w a0, fa0
-; RV32ID-NEXT: lui a1, %hi(.LCPI12_0)
-; RV32ID-NEXT: fmv.w.x fa5, zero
-; RV32ID-NEXT: flw fa4, %lo(.LCPI12_0)(a1)
+; RV32ID-NEXT: lui a1, 391168
; RV32ID-NEXT: slli a0, a0, 16
+; RV32ID-NEXT: addi a1, a1, -1
; RV32ID-NEXT: fmv.w.x fa0, a0
-; RV32ID-NEXT: fle.s a0, fa5, fa0
-; RV32ID-NEXT: flt.s a1, fa4, fa0
-; RV32ID-NEXT: neg s0, a1
-; RV32ID-NEXT: neg s1, a0
+; RV32ID-NEXT: fmv.w.x fa5, a1
+; RV32ID-NEXT: flt.s a0, fa5, fa0
+; RV32ID-NEXT: fmv.w.x fa5, zero
+; RV32ID-NEXT: fle.s a1, fa5, fa0
+; RV32ID-NEXT: neg s0, a0
+; RV32ID-NEXT: neg s1, a1
; RV32ID-NEXT: call __fixunssfdi
; RV32ID-NEXT: and a0, s1, a0
; RV32ID-NEXT: and a1, s1, a1
diff --git a/llvm/test/CodeGen/RISCV/bfloat-imm.ll b/llvm/test/CodeGen/RISCV/bfloat-imm.ll
index 76ff720b1c268..61014891414d8 100644
--- a/llvm/test/CodeGen/RISCV/bfloat-imm.ll
+++ b/llvm/test/CodeGen/RISCV/bfloat-imm.ll
@@ -7,8 +7,9 @@
define bfloat @bfloat_imm() nounwind {
; CHECK-LABEL: bfloat_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: lui a0, 4
+; CHECK-NEXT: addi a0, a0, 64
+; CHECK-NEXT: fmv.h.x fa0, a0
; CHECK-NEXT: ret
ret bfloat 3.0
}
diff --git a/llvm/test/CodeGen/RISCV/calling-conv-half.ll b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
index d7957540d1b29..d8e6b7f3ede9a 100644
--- a/llvm/test/CodeGen/RISCV/calling-conv-half.ll
+++ b/llvm/test/CodeGen/RISCV/calling-conv-half.ll
@@ -519,15 +519,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV32-ILP32F: # %bb.0:
; RV32-ILP32F-NEXT: addi sp, sp, -16
; RV32-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32F-NEXT: lui a4, %hi(.LCPI3_0)
+; RV32-ILP32F-NEXT: lui a7, 1048565
; RV32-ILP32F-NEXT: li a0, 1
; RV32-ILP32F-NEXT: li a1, 2
; RV32-ILP32F-NEXT: li a2, 3
; RV32-ILP32F-NEXT: li a3, 4
-; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI3_0)(a4)
; RV32-ILP32F-NEXT: li a4, 5
; RV32-ILP32F-NEXT: li a5, 6
; RV32-ILP32F-NEXT: li a6, 7
+; RV32-ILP32F-NEXT: addi a7, a7, -1792
+; RV32-ILP32F-NEXT: fmv.w.x fa0, a7
; RV32-ILP32F-NEXT: li a7, 8
; RV32-ILP32F-NEXT: call callee_half_on_stack
; RV32-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -538,15 +539,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV64-LP64F: # %bb.0:
; RV64-LP64F-NEXT: addi sp, sp, -16
; RV64-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64F-NEXT: lui a4, %hi(.LCPI3_0)
+; RV64-LP64F-NEXT: lui a7, 1048565
; RV64-LP64F-NEXT: li a0, 1
; RV64-LP64F-NEXT: li a1, 2
; RV64-LP64F-NEXT: li a2, 3
; RV64-LP64F-NEXT: li a3, 4
-; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI3_0)(a4)
; RV64-LP64F-NEXT: li a4, 5
; RV64-LP64F-NEXT: li a5, 6
; RV64-LP64F-NEXT: li a6, 7
+; RV64-LP64F-NEXT: addi a7, a7, -1792
+; RV64-LP64F-NEXT: fmv.w.x fa0, a7
; RV64-LP64F-NEXT: li a7, 8
; RV64-LP64F-NEXT: call callee_half_on_stack
; RV64-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -557,15 +559,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV32-ILP32ZFHMIN: # %bb.0:
; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32ZFHMIN-NEXT: lui a4, %hi(.LCPI3_0)
+; RV32-ILP32ZFHMIN-NEXT: lui a7, 5
; RV32-ILP32ZFHMIN-NEXT: li a0, 1
; RV32-ILP32ZFHMIN-NEXT: li a1, 2
; RV32-ILP32ZFHMIN-NEXT: li a2, 3
; RV32-ILP32ZFHMIN-NEXT: li a3, 4
-; RV32-ILP32ZFHMIN-NEXT: flh fa0, %lo(.LCPI3_0)(a4)
; RV32-ILP32ZFHMIN-NEXT: li a4, 5
; RV32-ILP32ZFHMIN-NEXT: li a5, 6
; RV32-ILP32ZFHMIN-NEXT: li a6, 7
+; RV32-ILP32ZFHMIN-NEXT: addi a7, a7, -1792
+; RV32-ILP32ZFHMIN-NEXT: fmv.h.x fa0, a7
; RV32-ILP32ZFHMIN-NEXT: li a7, 8
; RV32-ILP32ZFHMIN-NEXT: call callee_half_on_stack
; RV32-ILP32ZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -576,15 +579,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV64-LP64ZFHMIN: # %bb.0:
; RV64-LP64ZFHMIN-NEXT: addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64ZFHMIN-NEXT: lui a4, %hi(.LCPI3_0)
+; RV64-LP64ZFHMIN-NEXT: lui a7, 5
; RV64-LP64ZFHMIN-NEXT: li a0, 1
; RV64-LP64ZFHMIN-NEXT: li a1, 2
; RV64-LP64ZFHMIN-NEXT: li a2, 3
; RV64-LP64ZFHMIN-NEXT: li a3, 4
-; RV64-LP64ZFHMIN-NEXT: flh fa0, %lo(.LCPI3_0)(a4)
; RV64-LP64ZFHMIN-NEXT: li a4, 5
; RV64-LP64ZFHMIN-NEXT: li a5, 6
; RV64-LP64ZFHMIN-NEXT: li a6, 7
+; RV64-LP64ZFHMIN-NEXT: addi a7, a7, -1792
+; RV64-LP64ZFHMIN-NEXT: fmv.h.x fa0, a7
; RV64-LP64ZFHMIN-NEXT: li a7, 8
; RV64-LP64ZFHMIN-NEXT: call callee_half_on_stack
; RV64-LP64ZFHMIN-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -595,15 +599,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV32-ZFH-ILP32: # %bb.0:
; RV32-ZFH-ILP32-NEXT: addi sp, sp, -16
; RV32-ZFH-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ZFH-ILP32-NEXT: lui a4, %hi(.LCPI3_0)
+; RV32-ZFH-ILP32-NEXT: lui a7, 5
; RV32-ZFH-ILP32-NEXT: li a0, 1
; RV32-ZFH-ILP32-NEXT: li a1, 2
; RV32-ZFH-ILP32-NEXT: li a2, 3
; RV32-ZFH-ILP32-NEXT: li a3, 4
-; RV32-ZFH-ILP32-NEXT: flh fa5, %lo(.LCPI3_0)(a4)
; RV32-ZFH-ILP32-NEXT: li a4, 5
; RV32-ZFH-ILP32-NEXT: li a5, 6
; RV32-ZFH-ILP32-NEXT: li a6, 7
+; RV32-ZFH-ILP32-NEXT: addi a7, a7, -1792
+; RV32-ZFH-ILP32-NEXT: fmv.h.x fa5, a7
; RV32-ZFH-ILP32-NEXT: li a7, 8
; RV32-ZFH-ILP32-NEXT: fsh fa5, 0(sp)
; RV32-ZFH-ILP32-NEXT: call callee_half_on_stack
@@ -615,15 +620,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV32-ZFH-ILP32F: # %bb.0:
; RV32-ZFH-ILP32F-NEXT: addi sp, sp, -16
; RV32-ZFH-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ZFH-ILP32F-NEXT: lui a4, %hi(.LCPI3_0)
+; RV32-ZFH-ILP32F-NEXT: lui a7, 5
; RV32-ZFH-ILP32F-NEXT: li a0, 1
; RV32-ZFH-ILP32F-NEXT: li a1, 2
; RV32-ZFH-ILP32F-NEXT: li a2, 3
; RV32-ZFH-ILP32F-NEXT: li a3, 4
-; RV32-ZFH-ILP32F-NEXT: flh fa0, %lo(.LCPI3_0)(a4)
; RV32-ZFH-ILP32F-NEXT: li a4, 5
; RV32-ZFH-ILP32F-NEXT: li a5, 6
; RV32-ZFH-ILP32F-NEXT: li a6, 7
+; RV32-ZFH-ILP32F-NEXT: addi a7, a7, -1792
+; RV32-ZFH-ILP32F-NEXT: fmv.h.x fa0, a7
; RV32-ZFH-ILP32F-NEXT: li a7, 8
; RV32-ZFH-ILP32F-NEXT: call callee_half_on_stack
; RV32-ZFH-ILP32F-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -634,15 +640,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV64-ZFH-LP64: # %bb.0:
; RV64-ZFH-LP64-NEXT: addi sp, sp, -16
; RV64-ZFH-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ZFH-LP64-NEXT: lui a4, %hi(.LCPI3_0)
+; RV64-ZFH-LP64-NEXT: lui a7, 5
; RV64-ZFH-LP64-NEXT: li a0, 1
; RV64-ZFH-LP64-NEXT: li a1, 2
; RV64-ZFH-LP64-NEXT: li a2, 3
; RV64-ZFH-LP64-NEXT: li a3, 4
-; RV64-ZFH-LP64-NEXT: flh fa5, %lo(.LCPI3_0)(a4)
; RV64-ZFH-LP64-NEXT: li a4, 5
; RV64-ZFH-LP64-NEXT: li a5, 6
; RV64-ZFH-LP64-NEXT: li a6, 7
+; RV64-ZFH-LP64-NEXT: addi a7, a7, -1792
+; RV64-ZFH-LP64-NEXT: fmv.h.x fa5, a7
; RV64-ZFH-LP64-NEXT: li a7, 8
; RV64-ZFH-LP64-NEXT: fsh fa5, 0(sp)
; RV64-ZFH-LP64-NEXT: call callee_half_on_stack
@@ -654,15 +661,16 @@ define i32 @caller_half_on_stack() nounwind {
; RV64-ZFH-LP64F: # %bb.0:
; RV64-ZFH-LP64F-NEXT: addi sp, sp, -16
; RV64-ZFH-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ZFH-LP64F-NEXT: lui a4, %hi(.LCPI3_0)
+; RV64-ZFH-LP64F-NEXT: lui a7, 5
; RV64-ZFH-LP64F-NEXT: li a0, 1
; RV64-ZFH-LP64F-NEXT: li a1, 2
; RV64-ZFH-LP64F-NEXT: li a2, 3
; RV64-ZFH-LP64F-NEXT: li a3, 4
-; RV64-ZFH-LP64F-NEXT: flh fa0, %lo(.LCPI3_0)(a4)
; RV64-ZFH-LP64F-NEXT: li a4, 5
; RV64-ZFH-LP64F-NEXT: li a5, 6
; RV64-ZFH-LP64F-NEXT: li a6, 7
+; RV64-ZFH-LP64F-NEXT: addi a7, a7, -1792
+; RV64-ZFH-LP64F-NEXT: fmv.h.x fa0, a7
; RV64-ZFH-LP64F-NEXT: li a7, 8
; RV64-ZFH-LP64F-NEXT: call callee_half_on_stack
; RV64-ZFH-LP64F-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -1038,31 +1046,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ILP32ZFHMIN: # %bb.0:
; RV32-ILP32ZFHMIN-NEXT: addi sp, sp, -16
; RV32-ILP32ZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ILP32ZFHMIN-NEXT: lui a0, %hi(.LCPI5_0)
-; RV32-ILP32ZFHMIN-NEXT: lui a1, 260096
-; RV32-ILP32ZFHMIN-NEXT: lui a2, 262144
-; RV32-ILP32ZFHMIN-NEXT: lui a3, 263168
-; RV32-ILP32ZFHMIN-NEXT: lui a4, 264192
-; RV32-ILP32ZFHMIN-NEXT: lui a5, 264704
-; RV32-ILP32ZFHMIN-NEXT: lui a6, 265216
-; RV32-ILP32ZFHMIN-NEXT: lui a7, 265728
-; RV32-ILP32ZFHMIN-NEXT: flh ft0, %lo(.LCPI5_0)(a0)
+; RV32-ILP32ZFHMIN-NEXT: lui a7, 5
+; RV32-ILP32ZFHMIN-NEXT: lui a0, 260096
+; RV32-ILP32ZFHMIN-NEXT: lui a1, 262144
+; RV32-ILP32ZFHMIN-NEXT: lui a2, 263168
+; RV32-ILP32ZFHMIN-NEXT: lui a3, 264192
+; RV32-ILP32ZFHMIN-NEXT: lui a4, 264704
+; RV32-ILP32ZFHMIN-NEXT: lui a5, 265216
+; RV32-ILP32ZFHMIN-NEXT: lui a6, 265728
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa0, a0
; RV32-ILP32ZFHMIN-NEXT: lui t0, 266240
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa0, a1
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa1, a1
; RV32-ILP32ZFHMIN-NEXT: li a0, 1
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa1, a2
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa2, a2
; RV32-ILP32ZFHMIN-NEXT: li a1, 2
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa2, a3
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa3, a3
; RV32-ILP32ZFHMIN-NEXT: li a2, 3
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa3, a4
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa4, a4
; RV32-ILP32ZFHMIN-NEXT: li a3, 4
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa4, a5
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa5, a6
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa6, a7
-; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa7, t0
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa5, a5
; RV32-ILP32ZFHMIN-NEXT: li a4, 5
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa6, a6
; RV32-ILP32ZFHMIN-NEXT: li a5, 6
+; RV32-ILP32ZFHMIN-NEXT: fmv.w.x fa7, t0
; RV32-ILP32ZFHMIN-NEXT: li a6, 7
+; RV32-ILP32ZFHMIN-NEXT: addi a7, a7, -1792
+; RV32-ILP32ZFHMIN-NEXT: fmv.h.x ft0, a7
; RV32-ILP32ZFHMIN-NEXT: li a7, 8
; RV32-ILP32ZFHMIN-NEXT: fsh ft0, 0(sp)
; RV32-ILP32ZFHMIN-NEXT: call callee_half_on_stack
@@ -1074,31 +1083,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind {
; RV64-LP64ZFHMIN: # %bb.0:
; RV64-LP64ZFHMIN-NEXT: addi sp, sp, -16
; RV64-LP64ZFHMIN-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-LP64ZFHMIN-NEXT: lui a0, %hi(.LCPI5_0)
-; RV64-LP64ZFHMIN-NEXT: lui a1, 260096
-; RV64-LP64ZFHMIN-NEXT: lui a2, 262144
-; RV64-LP64ZFHMIN-NEXT: lui a3, 263168
-; RV64-LP64ZFHMIN-NEXT: lui a4, 264192
-; RV64-LP64ZFHMIN-NEXT: lui a5, 264704
-; RV64-LP64ZFHMIN-NEXT: lui a6, 265216
-; RV64-LP64ZFHMIN-NEXT: lui a7, 265728
-; RV64-LP64ZFHMIN-NEXT: flh ft0, %lo(.LCPI5_0)(a0)
+; RV64-LP64ZFHMIN-NEXT: lui a7, 5
+; RV64-LP64ZFHMIN-NEXT: lui a0, 260096
+; RV64-LP64ZFHMIN-NEXT: lui a1, 262144
+; RV64-LP64ZFHMIN-NEXT: lui a2, 263168
+; RV64-LP64ZFHMIN-NEXT: lui a3, 264192
+; RV64-LP64ZFHMIN-NEXT: lui a4, 264704
+; RV64-LP64ZFHMIN-NEXT: lui a5, 265216
+; RV64-LP64ZFHMIN-NEXT: lui a6, 265728
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa0, a0
; RV64-LP64ZFHMIN-NEXT: lui t0, 266240
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa0, a1
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa1, a1
; RV64-LP64ZFHMIN-NEXT: li a0, 1
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa1, a2
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa2, a2
; RV64-LP64ZFHMIN-NEXT: li a1, 2
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa2, a3
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa3, a3
; RV64-LP64ZFHMIN-NEXT: li a2, 3
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa3, a4
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa4, a4
; RV64-LP64ZFHMIN-NEXT: li a3, 4
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa4, a5
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa5, a6
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa6, a7
-; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa7, t0
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa5, a5
; RV64-LP64ZFHMIN-NEXT: li a4, 5
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa6, a6
; RV64-LP64ZFHMIN-NEXT: li a5, 6
+; RV64-LP64ZFHMIN-NEXT: fmv.w.x fa7, t0
; RV64-LP64ZFHMIN-NEXT: li a6, 7
+; RV64-LP64ZFHMIN-NEXT: addi a7, a7, -1792
+; RV64-LP64ZFHMIN-NEXT: fmv.h.x ft0, a7
; RV64-LP64ZFHMIN-NEXT: li a7, 8
; RV64-LP64ZFHMIN-NEXT: fsh ft0, 0(sp)
; RV64-LP64ZFHMIN-NEXT: call callee_half_on_stack
@@ -1110,31 +1120,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ZFH-ILP32: # %bb.0:
; RV32-ZFH-ILP32-NEXT: addi sp, sp, -48
; RV32-ZFH-ILP32-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
-; RV32-ZFH-ILP32-NEXT: lui a2, %hi(.LCPI5_0)
-; RV32-ZFH-ILP32-NEXT: lui a3, 266240
-; RV32-ZFH-ILP32-NEXT: li a4, 8
-; RV32-ZFH-ILP32-NEXT: lui a5, 265728
-; RV32-ZFH-ILP32-NEXT: li a6, 7
-; RV32-ZFH-ILP32-NEXT: lui a7, 265216
-; RV32-ZFH-ILP32-NEXT: li t0, 6
-; RV32-ZFH-ILP32-NEXT: lui t1, 264704
-; RV32-ZFH-ILP32-NEXT: li t2, 5
+; RV32-ZFH-ILP32-NEXT: lui a5, 266240
+; RV32-ZFH-ILP32-NEXT: li a6, 8
+; RV32-ZFH-ILP32-NEXT: lui a7, 265728
+; RV32-ZFH-ILP32-NEXT: li t0, 7
+; RV32-ZFH-ILP32-NEXT: lui t1, 265216
+; RV32-ZFH-ILP32-NEXT: li t2, 6
+; RV32-ZFH-ILP32-NEXT: lui t3, 264704
+; RV32-ZFH-ILP32-NEXT: li t4, 5
+; RV32-ZFH-ILP32-NEXT: lui t5, 5
; RV32-ZFH-ILP32-NEXT: li a0, 1
; RV32-ZFH-ILP32-NEXT: lui a1, 260096
-; RV32-ZFH-ILP32-NEXT: flh fa5, %lo(.LCPI5_0)(a2)
; RV32-ZFH-ILP32-NEXT: li a2, 2
-; RV32-ZFH-ILP32-NEXT: sw a6, 16(sp)
-; RV32-ZFH-ILP32-NEXT: sw a5, 20(sp)
-; RV32-ZFH-ILP32-NEXT: sw a4, 24(sp)
-; RV32-ZFH-ILP32-NEXT: sw a3, 28(sp)
; RV32-ZFH-ILP32-NEXT: lui a3, 262144
-; RV32-ZFH-ILP32-NEXT: sw t2, 0(sp)
-; RV32-ZFH-ILP32-NEXT: sw t1, 4(sp)
-; RV32-ZFH-ILP32-NEXT: sw t0, 8(sp)
-; RV32-ZFH-ILP32-NEXT: sw a7, 12(sp)
; RV32-ZFH-ILP32-NEXT: li a4, 3
+; RV32-ZFH-ILP32-NEXT: sw t0, 16(sp)
+; RV32-ZFH-ILP32-NEXT: sw a7, 20(sp)
+; RV32-ZFH-ILP32-NEXT: sw a6, 24(sp)
+; RV32-ZFH-ILP32-NEXT: sw a5, 28(sp)
; RV32-ZFH-ILP32-NEXT: lui a5, 263168
+; RV32-ZFH-ILP32-NEXT: sw t4, 0(sp)
+; RV32-ZFH-ILP32-NEXT: sw t3, 4(sp)
+; RV32-ZFH-ILP32-NEXT: sw t2, 8(sp)
+; RV32-ZFH-ILP32-NEXT: sw t1, 12(sp)
; RV32-ZFH-ILP32-NEXT: li a6, 4
+; RV32-ZFH-ILP32-NEXT: addi a7, t5, -1792
+; RV32-ZFH-ILP32-NEXT: fmv.h.x fa5, a7
; RV32-ZFH-ILP32-NEXT: lui a7, 264192
; RV32-ZFH-ILP32-NEXT: fsh fa5, 32(sp)
; RV32-ZFH-ILP32-NEXT: call callee_half_on_stack
@@ -1146,31 +1157,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind {
; RV32-ZFH-ILP32F: # %bb.0:
; RV32-ZFH-ILP32F-NEXT: addi sp, sp, -16
; RV32-ZFH-ILP32F-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-ZFH-ILP32F-NEXT: lui a0, %hi(.LCPI5_0)
-; RV32-ZFH-ILP32F-NEXT: lui a1, 260096
-; RV32-ZFH-ILP32F-NEXT: lui a2, 262144
-; RV32-ZFH-ILP32F-NEXT: lui a3, 263168
-; RV32-ZFH-ILP32F-NEXT: lui a4, 264192
-; RV32-ZFH-ILP32F-NEXT: lui a5, 264704
-; RV32-ZFH-ILP32F-NEXT: lui a6, 265216
-; RV32-ZFH-ILP32F-NEXT: lui a7, 265728
-; RV32-ZFH-ILP32F-NEXT: flh ft0, %lo(.LCPI5_0)(a0)
+; RV32-ZFH-ILP32F-NEXT: lui a7, 5
+; RV32-ZFH-ILP32F-NEXT: lui a0, 260096
+; RV32-ZFH-ILP32F-NEXT: lui a1, 262144
+; RV32-ZFH-ILP32F-NEXT: lui a2, 263168
+; RV32-ZFH-ILP32F-NEXT: lui a3, 264192
+; RV32-ZFH-ILP32F-NEXT: lui a4, 264704
+; RV32-ZFH-ILP32F-NEXT: lui a5, 265216
+; RV32-ZFH-ILP32F-NEXT: lui a6, 265728
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa0, a0
; RV32-ZFH-ILP32F-NEXT: lui t0, 266240
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa0, a1
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa1, a1
; RV32-ZFH-ILP32F-NEXT: li a0, 1
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa1, a2
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa2, a2
; RV32-ZFH-ILP32F-NEXT: li a1, 2
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa2, a3
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa3, a3
; RV32-ZFH-ILP32F-NEXT: li a2, 3
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa3, a4
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa4, a4
; RV32-ZFH-ILP32F-NEXT: li a3, 4
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa4, a5
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa5, a6
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa6, a7
-; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa7, t0
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa5, a5
; RV32-ZFH-ILP32F-NEXT: li a4, 5
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa6, a6
; RV32-ZFH-ILP32F-NEXT: li a5, 6
+; RV32-ZFH-ILP32F-NEXT: fmv.w.x fa7, t0
; RV32-ZFH-ILP32F-NEXT: li a6, 7
+; RV32-ZFH-ILP32F-NEXT: addi a7, a7, -1792
+; RV32-ZFH-ILP32F-NEXT: fmv.h.x ft0, a7
; RV32-ZFH-ILP32F-NEXT: li a7, 8
; RV32-ZFH-ILP32F-NEXT: fsh ft0, 0(sp)
; RV32-ZFH-ILP32F-NEXT: call callee_half_on_stack
@@ -1182,31 +1194,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind {
; RV64-ZFH-LP64: # %bb.0:
; RV64-ZFH-LP64-NEXT: addi sp, sp, -80
; RV64-ZFH-LP64-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
-; RV64-ZFH-LP64-NEXT: lui a2, %hi(.LCPI5_0)
-; RV64-ZFH-LP64-NEXT: lui a3, 266240
-; RV64-ZFH-LP64-NEXT: li a4, 8
-; RV64-ZFH-LP64-NEXT: lui a5, 265728
-; RV64-ZFH-LP64-NEXT: li a6, 7
-; RV64-ZFH-LP64-NEXT: lui a7, 265216
-; RV64-ZFH-LP64-NEXT: li t0, 6
-; RV64-ZFH-LP64-NEXT: lui t1, 264704
-; RV64-ZFH-LP64-NEXT: li t2, 5
+; RV64-ZFH-LP64-NEXT: lui a5, 266240
+; RV64-ZFH-LP64-NEXT: li a6, 8
+; RV64-ZFH-LP64-NEXT: lui a7, 265728
+; RV64-ZFH-LP64-NEXT: li t0, 7
+; RV64-ZFH-LP64-NEXT: lui t1, 265216
+; RV64-ZFH-LP64-NEXT: li t2, 6
+; RV64-ZFH-LP64-NEXT: lui t3, 264704
+; RV64-ZFH-LP64-NEXT: li t4, 5
+; RV64-ZFH-LP64-NEXT: lui t5, 5
; RV64-ZFH-LP64-NEXT: li a0, 1
; RV64-ZFH-LP64-NEXT: lui a1, 260096
-; RV64-ZFH-LP64-NEXT: flh fa5, %lo(.LCPI5_0)(a2)
; RV64-ZFH-LP64-NEXT: li a2, 2
-; RV64-ZFH-LP64-NEXT: sd a6, 32(sp)
-; RV64-ZFH-LP64-NEXT: sw a5, 40(sp)
-; RV64-ZFH-LP64-NEXT: sd a4, 48(sp)
-; RV64-ZFH-LP64-NEXT: sw a3, 56(sp)
; RV64-ZFH-LP64-NEXT: lui a3, 262144
-; RV64-ZFH-LP64-NEXT: sd t2, 0(sp)
-; RV64-ZFH-LP64-NEXT: sw t1, 8(sp)
-; RV64-ZFH-LP64-NEXT: sd t0, 16(sp)
-; RV64-ZFH-LP64-NEXT: sw a7, 24(sp)
; RV64-ZFH-LP64-NEXT: li a4, 3
+; RV64-ZFH-LP64-NEXT: sd t0, 32(sp)
+; RV64-ZFH-LP64-NEXT: sw a7, 40(sp)
+; RV64-ZFH-LP64-NEXT: sd a6, 48(sp)
+; RV64-ZFH-LP64-NEXT: sw a5, 56(sp)
; RV64-ZFH-LP64-NEXT: lui a5, 263168
+; RV64-ZFH-LP64-NEXT: sd t4, 0(sp)
+; RV64-ZFH-LP64-NEXT: sw t3, 8(sp)
+; RV64-ZFH-LP64-NEXT: sd t2, 16(sp)
+; RV64-ZFH-LP64-NEXT: sw t1, 24(sp)
; RV64-ZFH-LP64-NEXT: li a6, 4
+; RV64-ZFH-LP64-NEXT: addi a7, t5, -1792
+; RV64-ZFH-LP64-NEXT: fmv.h.x fa5, a7
; RV64-ZFH-LP64-NEXT: lui a7, 264192
; RV64-ZFH-LP64-NEXT: fsh fa5, 64(sp)
; RV64-ZFH-LP64-NEXT: call callee_half_on_stack
@@ -1218,31 +1231,32 @@ define i32 @caller_half_on_stack_exhausted_gprs_fprs() nounwind {
; RV64-ZFH-LP64F: # %bb.0:
; RV64-ZFH-LP64F-NEXT: addi sp, sp, -16
; RV64-ZFH-LP64F-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-ZFH-LP64F-NEXT: lui a0, %hi(.LCPI5_0)
-; RV64-ZFH-LP64F-NEXT: lui a1, 260096
-; RV64-ZFH-LP64F-NEXT: lui a2, 262144
-; RV64-ZFH-LP64F-NEXT: lui a3, 263168
-; RV64-ZFH-LP64F-NEXT: lui a4, 264192
-; RV64-ZFH-LP64F-NEXT: lui a5, 264704
-; RV64-ZFH-LP64F-NEXT: lui a6, 265216
-; RV64-ZFH-LP64F-NEXT: lui a7, 265728
-; RV64-ZFH-LP64F-NEXT: flh ft0, %lo(.LCPI5_0)(a0)
+; RV64-ZFH-LP64F-NEXT: lui a7, 5
+; RV64-ZFH-LP64F-NEXT: lui a0, 260096
+; RV64-ZFH-LP64F-NEXT: lui a1, 262144
+; RV64-ZFH-LP64F-NEXT: lui a2, 263168
+; RV64-ZFH-LP64F-NEXT: lui a3, 264192
+; RV64-ZFH-LP64F-NEXT: lui a4, 264704
+; RV64-ZFH-LP64F-NEXT: lui a5, 265216
+; RV64-ZFH-LP64F-NEXT: lui a6, 265728
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa0, a0
; RV64-ZFH-LP64F-NEXT: lui t0, 266240
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa0, a1
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa1, a1
; RV64-ZFH-LP64F-NEXT: li a0, 1
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa1, a2
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa2, a2
; RV64-ZFH-LP64F-NEXT: li a1, 2
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa2, a3
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa3, a3
; RV64-ZFH-LP64F-NEXT: li a2, 3
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa3, a4
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa4, a4
; RV64-ZFH-LP64F-NEXT: li a3, 4
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa4, a5
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa5, a6
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa6, a7
-; RV64-ZFH-LP64F-NEXT: fmv.w.x fa7, t0
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa5, a5
; RV64-ZFH-LP64F-NEXT: li a4, 5
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa6, a6
; RV64-ZFH-LP64F-NEXT: li a5, 6
+; RV64-ZFH-LP64F-NEXT: fmv.w.x fa7, t0
; RV64-ZFH-LP64F-NEXT: li a6, 7
+; RV64-ZFH-LP64F-NEXT: addi a7, a7, -1792
+; RV64-ZFH-LP64F-NEXT: fmv.h.x ft0, a7
; RV64-ZFH-LP64F-NEXT: li a7, 8
; RV64-ZFH-LP64F-NEXT: fsh ft0, 0(sp)
; RV64-ZFH-LP64F-NEXT: call callee_half_on_stack
@@ -1280,26 +1294,30 @@ define half @callee_half_ret() nounwind {
;
; RV32-ILP32F-LABEL: callee_half_ret:
; RV32-ILP32F: # %bb.0:
-; RV32-ILP32F-NEXT: lui a0, %hi(.LCPI6_0)
-; RV32-ILP32F-NEXT: flw fa0, %lo(.LCPI6_0)(a0)
+; RV32-ILP32F-NEXT: lui a0, 1048564
+; RV32-ILP32F-NEXT: addi a0, a0, -1024
+; RV32-ILP32F-NEXT: fmv.w.x fa0, a0
; RV32-ILP32F-NEXT: ret
;
; RV64-LP64F-LABEL: callee_half_ret:
; RV64-LP64F: # %bb.0:
-; RV64-LP64F-NEXT: lui a0, %hi(.LCPI6_0)
-; RV64-LP64F-NEXT: flw fa0, %lo(.LCPI6_0)(a0)
+; RV64-LP64F-NEXT: lui a0, 1048564
+; RV64-LP64F-NEXT: addi a0, a0, -1024
+; RV64-LP64F-NEXT: fmv.w.x fa0, a0
; RV64-LP64F-NEXT: ret
;
; RV32-ILP32ZFHMIN-LABEL: callee_half_ret:
; RV32-ILP32ZFHMIN: # %bb.0:
-; RV32-ILP32ZFHMIN-NEXT: lui a0, %hi(.LCPI6_0)
-; RV32-ILP32ZFHMIN-NEXT: flh fa0, %lo(.LCPI6_0)(a0)
+; RV32-ILP32ZFHMIN-NEXT: li a0, 15
+; RV32-ILP32ZFHMIN-NEXT: slli a0, a0, 10
+; RV32-ILP32ZFHMIN-NEXT: fmv.h.x fa0, a0
; RV32-ILP32ZFHMIN-NEXT: ret
;
; RV64-LP64ZFHMIN-LABEL: callee_half_ret:
; RV64-LP64ZFHMIN: # %bb.0:
-; RV64-LP64ZFHMIN-NEXT: lui a0, %hi(.LCPI6_0)
-; RV64-LP64ZFHMIN-NEXT: flh fa0, %lo(.LCPI6_0)(a0)
+; RV64-LP64ZFHMIN-NEXT: li a0, 15
+; RV64-LP64ZFHMIN-NEXT: slli a0, a0, 10
+; RV64-LP64ZFHMIN-NEXT: fmv.h.x fa0, a0
; RV64-LP64ZFHMIN-NEXT: ret
;
; RV32-ZFH-ILP32-LABEL: callee_half_ret:
@@ -1310,8 +1328,9 @@ define half @callee_half_ret() nounwind {
;
; RV32-ZFH-ILP32F-LABEL: callee_half_ret:
; RV32-ZFH-ILP32F: # %bb.0:
-; RV32-ZFH-ILP32F-NEXT: lui a0, %hi(.LCPI6_0)
-; RV32-ZFH-ILP32F-NEXT: flh fa0, %lo(.LCPI6_0)(a0)
+; RV32-ZFH-ILP32F-NEXT: li a0, 15
+; RV32-ZFH-ILP32F-NEXT: slli a0, a0, 10
+; RV32-ZFH-ILP32F-NEXT: fmv.h.x fa0, a0
; RV32-ZFH-ILP32F-NEXT: ret
;
; RV64-ZFH-LP64-LABEL: callee_half_ret:
@@ -1322,8 +1341,9 @@ define half @callee_half_ret() nounwind {
;
; RV64-ZFH-LP64F-LABEL: callee_half_ret:
; RV64-ZFH-LP64F: # %bb.0:
-; RV64-ZFH-LP64F-NEXT: lui a0, %hi(.LCPI6_0)
-; RV64-ZFH-LP64F-NEXT: flh fa0, %lo(.LCPI6_0)(a0)
+; RV64-ZFH-LP64F-NEXT: li a0, 15
+; RV64-ZFH-LP64F-NEXT: slli a0, a0, 10
+; RV64-ZFH-LP64F-NEXT: fmv.h.x fa0, a0
; RV64-ZFH-LP64F-NEXT: ret
ret half 1.0
}
diff --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
index 94f8d7cab9b95..220494a4c4ff8 100644
--- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
+++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll
@@ -287,8 +287,9 @@ indirectgoto:
define float @lower_constantpool(float %a) nounwind {
; RV32F-SMALL-LABEL: lower_constantpool:
; RV32F-SMALL: # %bb.0:
-; RV32F-SMALL-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32F-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
+; RV32F-SMALL-NEXT: lui a0, 260097
+; RV32F-SMALL-NEXT: addi a0, a0, -2048
+; RV32F-SMALL-NEXT: fmv.w.x fa5, a0
; RV32F-SMALL-NEXT: fadd.s fa0, fa0, fa5
; RV32F-SMALL-NEXT: ret
;
@@ -301,32 +302,33 @@ define float @lower_constantpool(float %a) nounwind {
;
; RV32F-MEDIUM-LABEL: lower_constantpool:
; RV32F-MEDIUM: # %bb.0:
-; RV32F-MEDIUM-NEXT: .Lpcrel_hi3:
-; RV32F-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
-; RV32F-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV32F-MEDIUM-NEXT: lui a0, 260097
+; RV32F-MEDIUM-NEXT: addi a0, a0, -2048
+; RV32F-MEDIUM-NEXT: fmv.w.x fa5, a0
; RV32F-MEDIUM-NEXT: fadd.s fa0, fa0, fa5
; RV32F-MEDIUM-NEXT: ret
;
; RV64F-SMALL-LABEL: lower_constantpool:
; RV64F-SMALL: # %bb.0:
-; RV64F-SMALL-NEXT: lui a0, %hi(.LCPI3_0)
-; RV64F-SMALL-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
+; RV64F-SMALL-NEXT: lui a0, 260097
+; RV64F-SMALL-NEXT: addi a0, a0, -2048
+; RV64F-SMALL-NEXT: fmv.w.x fa5, a0
; RV64F-SMALL-NEXT: fadd.s fa0, fa0, fa5
; RV64F-SMALL-NEXT: ret
;
; RV64F-MEDIUM-LABEL: lower_constantpool:
; RV64F-MEDIUM: # %bb.0:
-; RV64F-MEDIUM-NEXT: .Lpcrel_hi3:
-; RV64F-MEDIUM-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
-; RV64F-MEDIUM-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64F-MEDIUM-NEXT: lui a0, 260097
+; RV64F-MEDIUM-NEXT: addi a0, a0, -2048
+; RV64F-MEDIUM-NEXT: fmv.w.x fa5, a0
; RV64F-MEDIUM-NEXT: fadd.s fa0, fa0, fa5
; RV64F-MEDIUM-NEXT: ret
;
; RV64F-LARGE-LABEL: lower_constantpool:
; RV64F-LARGE: # %bb.0:
-; RV64F-LARGE-NEXT: .Lpcrel_hi3:
-; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI3_0)
-; RV64F-LARGE-NEXT: flw fa5, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64F-LARGE-NEXT: lui a0, 260097
+; RV64F-LARGE-NEXT: addi a0, a0, -2048
+; RV64F-LARGE-NEXT: fmv.w.x fa5, a0
; RV64F-LARGE-NEXT: fadd.s fa0, fa0, fa5
; RV64F-LARGE-NEXT: ret
;
@@ -390,13 +392,13 @@ define i32 @lower_extern_weak(i32 %a) nounwind {
; RV32IXQCILI-SMALL-NEXT: lw a0, 0(a0)
; RV32IXQCILI-SMALL-NEXT: ret
;
-; RV32F-MEDIUM-LABEL: lower_extern_weak:
-; RV32F-MEDIUM: # %bb.0:
-; RV32F-MEDIUM-NEXT: .Lpcrel_hi4:
-; RV32F-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
-; RV32F-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi4)(a0)
-; RV32F-MEDIUM-NEXT: lw a0, 0(a0)
-; RV32F-MEDIUM-NEXT: ret
+; RV32I-MEDIUM-LABEL: lower_extern_weak:
+; RV32I-MEDIUM: # %bb.0:
+; RV32I-MEDIUM-NEXT: .Lpcrel_hi3:
+; RV32I-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
+; RV32I-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV32I-MEDIUM-NEXT: lw a0, 0(a0)
+; RV32I-MEDIUM-NEXT: ret
;
; RV64I-SMALL-LABEL: lower_extern_weak:
; RV64I-SMALL: # %bb.0:
@@ -404,45 +406,21 @@ define i32 @lower_extern_weak(i32 %a) nounwind {
; RV64I-SMALL-NEXT: lw a0, %lo(W)(a0)
; RV64I-SMALL-NEXT: ret
;
-; RV64F-MEDIUM-LABEL: lower_extern_weak:
-; RV64F-MEDIUM: # %bb.0:
-; RV64F-MEDIUM-NEXT: .Lpcrel_hi4:
-; RV64F-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
-; RV64F-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0)
-; RV64F-MEDIUM-NEXT: lw a0, 0(a0)
-; RV64F-MEDIUM-NEXT: ret
-;
-; RV64F-LARGE-LABEL: lower_extern_weak:
-; RV64F-LARGE: # %bb.0:
-; RV64F-LARGE-NEXT: .Lpcrel_hi4:
-; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0)
-; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0)
-; RV64F-LARGE-NEXT: lw a0, 0(a0)
-; RV64F-LARGE-NEXT: ret
-;
-; RV32FINX-MEDIUM-LABEL: lower_extern_weak:
-; RV32FINX-MEDIUM: # %bb.0:
-; RV32FINX-MEDIUM-NEXT: .Lpcrel_hi3:
-; RV32FINX-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
-; RV32FINX-MEDIUM-NEXT: lw a0, %pcrel_lo(.Lpcrel_hi3)(a0)
-; RV32FINX-MEDIUM-NEXT: lw a0, 0(a0)
-; RV32FINX-MEDIUM-NEXT: ret
-;
-; RV64FINX-MEDIUM-LABEL: lower_extern_weak:
-; RV64FINX-MEDIUM: # %bb.0:
-; RV64FINX-MEDIUM-NEXT: .Lpcrel_hi3:
-; RV64FINX-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
-; RV64FINX-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0)
-; RV64FINX-MEDIUM-NEXT: lw a0, 0(a0)
-; RV64FINX-MEDIUM-NEXT: ret
+; RV64I-MEDIUM-LABEL: lower_extern_weak:
+; RV64I-MEDIUM: # %bb.0:
+; RV64I-MEDIUM-NEXT: .Lpcrel_hi3:
+; RV64I-MEDIUM-NEXT: auipc a0, %got_pcrel_hi(W)
+; RV64I-MEDIUM-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64I-MEDIUM-NEXT: lw a0, 0(a0)
+; RV64I-MEDIUM-NEXT: ret
;
-; RV64FINX-LARGE-LABEL: lower_extern_weak:
-; RV64FINX-LARGE: # %bb.0:
-; RV64FINX-LARGE-NEXT: .Lpcrel_hi3:
-; RV64FINX-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0)
-; RV64FINX-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0)
-; RV64FINX-LARGE-NEXT: lw a0, 0(a0)
-; RV64FINX-LARGE-NEXT: ret
+; RV64I-LARGE-LABEL: lower_extern_weak:
+; RV64I-LARGE: # %bb.0:
+; RV64I-LARGE-NEXT: .Lpcrel_hi3:
+; RV64I-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI4_0)
+; RV64I-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi3)(a0)
+; RV64I-LARGE-NEXT: lw a0, 0(a0)
+; RV64I-LARGE-NEXT: ret
%1 = load volatile i32, ptr @W
ret i32 %1
}
@@ -466,9 +444,9 @@ define half @lower_global_half(half %a) nounwind {
;
; RV32F-MEDIUM-LABEL: lower_global_half:
; RV32F-MEDIUM: # %bb.0:
-; RV32F-MEDIUM-NEXT: .Lpcrel_hi5:
+; RV32F-MEDIUM-NEXT: .Lpcrel_hi4:
; RV32F-MEDIUM-NEXT: auipc a0, %pcrel_hi(X)
-; RV32F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi5)(a0)
+; RV32F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi4)(a0)
; RV32F-MEDIUM-NEXT: fadd.h fa0, fa0, fa5
; RV32F-MEDIUM-NEXT: ret
;
@@ -481,17 +459,17 @@ define half @lower_global_half(half %a) nounwind {
;
; RV64F-MEDIUM-LABEL: lower_global_half:
; RV64F-MEDIUM: # %bb.0:
-; RV64F-MEDIUM-NEXT: .Lpcrel_hi5:
+; RV64F-MEDIUM-NEXT: .Lpcrel_hi4:
; RV64F-MEDIUM-NEXT: auipc a0, %pcrel_hi(X)
-; RV64F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi5)(a0)
+; RV64F-MEDIUM-NEXT: flh fa5, %pcrel_lo(.Lpcrel_hi4)(a0)
; RV64F-MEDIUM-NEXT: fadd.h fa0, fa0, fa5
; RV64F-MEDIUM-NEXT: ret
;
; RV64F-LARGE-LABEL: lower_global_half:
; RV64F-LARGE: # %bb.0:
-; RV64F-LARGE-NEXT: .Lpcrel_hi5:
+; RV64F-LARGE-NEXT: .Lpcrel_hi4:
; RV64F-LARGE-NEXT: auipc a0, %pcrel_hi(.LCPI5_0)
-; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi5)(a0)
+; RV64F-LARGE-NEXT: ld a0, %pcrel_lo(.Lpcrel_hi4)(a0)
; RV64F-LARGE-NEXT: flh fa5, 0(a0)
; RV64F-LARGE-NEXT: fadd.h fa0, fa0, fa5
; RV64F-LARGE-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll
index 8124d00e63fa7..c3e729800616d 100644
--- a/llvm/test/CodeGen/RISCV/double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert.ll
@@ -1636,14 +1636,15 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
;
; RV64IFD-LABEL: fcvt_w_s_sat_i16:
; RV64IFD: # %bb.0: # %start
-; RV64IFD-NEXT: lui a0, %hi(.LCPI26_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI26_0)(a0)
-; RV64IFD-NEXT: lui a0, %hi(.LCPI26_1)
-; RV64IFD-NEXT: fld fa4, %lo(.LCPI26_1)(a0)
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: fmax.d fa5, fa0, fa5
+; RV64IFD-NEXT: lui a1, %hi(.LCPI26_0)
+; RV64IFD-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV64IFD-NEXT: li a1, -505
+; RV64IFD-NEXT: slli a1, a1, 53
+; RV64IFD-NEXT: fmv.d.x fa4, a1
+; RV64IFD-NEXT: fmax.d fa4, fa0, fa4
; RV64IFD-NEXT: neg a0, a0
-; RV64IFD-NEXT: fmin.d fa5, fa5, fa4
+; RV64IFD-NEXT: fmin.d fa5, fa4, fa5
; RV64IFD-NEXT: fcvt.l.d a1, fa5, rtz
; RV64IFD-NEXT: and a0, a0, a1
; RV64IFD-NEXT: ret
@@ -1668,16 +1669,17 @@ define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind {
;
; RV64IZFINXZDINX-LABEL: fcvt_w_s_sat_i16:
; RV64IZFINXZDINX: # %bb.0: # %start
-; RV64IZFINXZDINX-NEXT: li a1, -505
-; RV64IZFINXZDINX-NEXT: lui a2, %hi(.LCPI26_0)
-; RV64IZFINXZDINX-NEXT: slli a1, a1, 53
-; RV64IZFINXZDINX-NEXT: ld a2, %lo(.LCPI26_0)(a2)
-; RV64IZFINXZDINX-NEXT: fmax.d a1, a0, a1
-; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0
-; RV64IZFINXZDINX-NEXT: neg a0, a0
-; RV64IZFINXZDINX-NEXT: fmin.d a1, a1, a2
-; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a1, rtz
-; RV64IZFINXZDINX-NEXT: and a0, a0, a1
+; RV64IZFINXZDINX-NEXT: feq.d a1, a0, a0
+; RV64IZFINXZDINX-NEXT: li a2, -505
+; RV64IZFINXZDINX-NEXT: slli a2, a2, 53
+; RV64IZFINXZDINX-NEXT: fmax.d a0, a0, a2
+; RV64IZFINXZDINX-NEXT: lui a2, 4152
+; RV64IZFINXZDINX-NEXT: neg a1, a1
+; RV64IZFINXZDINX-NEXT: addi a2, a2, -1
+; RV64IZFINXZDINX-NEXT: slli a2, a2, 38
+; RV64IZFINXZDINX-NEXT: fmin.d a0, a0, a2
+; RV64IZFINXZDINX-NEXT: fcvt.l.d a0, a0, rtz
+; RV64IZFINXZDINX-NEXT: and a0, a1, a0
; RV64IZFINXZDINX-NEXT: ret
;
; RV32I-LABEL: fcvt_w_s_sat_i16:
@@ -1859,9 +1861,10 @@ define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind {
;
; RV64IZFINXZDINX-LABEL: fcvt_wu_s_sat_i16:
; RV64IZFINXZDINX: # %bb.0: # %start
-; RV64IZFINXZDINX-NEXT: lui a1, %hi(.LCPI28_0)
-; RV64IZFINXZDINX-NEXT: ld a1, %lo(.LCPI28_0)(a1)
; RV64IZFINXZDINX-NEXT: fmax.d a0, a0, zero
+; RV64IZFINXZDINX-NEXT: lui a1, 8312
+; RV64IZFINXZDINX-NEXT: addi a1, a1, -1
+; RV64IZFINXZDINX-NEXT: slli a1, a1, 37
; RV64IZFINXZDINX-NEXT: fmin.d a0, a0, a1
; RV64IZFINXZDINX-NEXT: fcvt.lu.d a0, a0, rtz
; RV64IZFINXZDINX-NEXT: ret
@@ -2012,13 +2015,15 @@ define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind {
;
; RV64IFD-LABEL: fcvt_w_s_sat_i8:
; RV64IFD: # %bb.0: # %start
-; RV64IFD-NEXT: lui a0, %hi(.LCPI30_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI30_0)(a0)
-; RV64IFD-NEXT: lui a0, %hi(.LCPI30_1)
-; RV64IFD-NEXT: fld fa4, %lo(.LCPI30_1)(a0)
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: fmax.d fa5, fa0, fa5
+; RV64IFD-NEXT: li a1, -509
+; RV64IFD-NEXT: slli a1, a1, 53
+; RV64IFD-NEXT: fmv.d.x fa5, a1
+; RV64IFD-NEXT: lui a1, 65919
; RV64IFD-NEXT: neg a0, a0
+; RV64IFD-NEXT: slli a1, a1, 34
+; RV64IFD-NEXT: fmax.d fa5, fa0, fa5
+; RV64IFD-NEXT: fmv.d.x fa4, a1
; RV64IFD-NEXT: fmin.d fa5, fa5, fa4
; RV64IFD-NEXT: fcvt.l.d a1, fa5, rtz
; RV64IFD-NEXT: and a0, a0, a1
@@ -2214,11 +2219,12 @@ define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind {
;
; RV64IFD-LABEL: fcvt_wu_s_sat_i8:
; RV64IFD: # %bb.0: # %start
-; RV64IFD-NEXT: lui a0, %hi(.LCPI32_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI32_0)(a0)
-; RV64IFD-NEXT: fmv.d.x fa4, zero
-; RV64IFD-NEXT: fmax.d fa4, fa0, fa4
-; RV64IFD-NEXT: fmin.d fa5, fa4, fa5
+; RV64IFD-NEXT: fmv.d.x fa5, zero
+; RV64IFD-NEXT: lui a0, 131967
+; RV64IFD-NEXT: fmax.d fa5, fa0, fa5
+; RV64IFD-NEXT: slli a0, a0, 33
+; RV64IFD-NEXT: fmv.d.x fa4, a0
+; RV64IFD-NEXT: fmin.d fa5, fa5, fa4
; RV64IFD-NEXT: fcvt.lu.d a0, fa5, rtz
; RV64IFD-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll
index 1119fd6d74a25..6f7c30edba3ea 100644
--- a/llvm/test/CodeGen/RISCV/double-imm.ll
+++ b/llvm/test/CodeGen/RISCV/double-imm.ll
@@ -47,8 +47,9 @@ define double @double_imm_op(double %a) nounwind {
;
; CHECK64D-LABEL: double_imm_op:
; CHECK64D: # %bb.0:
-; CHECK64D-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK64D-NEXT: fld fa5, %lo(.LCPI1_0)(a0)
+; CHECK64D-NEXT: li a0, 1023
+; CHECK64D-NEXT: slli a0, a0, 52
+; CHECK64D-NEXT: fmv.d.x fa5, a0
; CHECK64D-NEXT: fadd.d fa0, fa0, fa5
; CHECK64D-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
index bb57665fa1801..caeb6e6ce70af 100644
--- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll
@@ -866,8 +866,9 @@ define double @floor_f64(double %a) nounwind {
;
; RV64IFD-LABEL: floor_f64:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI18_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB18_2
@@ -931,8 +932,9 @@ define double @ceil_f64(double %a) nounwind {
;
; RV64IFD-LABEL: ceil_f64:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI19_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB19_2
@@ -996,8 +998,9 @@ define double @trunc_f64(double %a) nounwind {
;
; RV64IFD-LABEL: trunc_f64:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI20_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB20_2
@@ -1061,8 +1064,9 @@ define double @rint_f64(double %a) nounwind {
;
; RV64IFD-LABEL: rint_f64:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI21_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB21_2
@@ -1167,8 +1171,9 @@ define double @round_f64(double %a) nounwind {
;
; RV64IFD-LABEL: round_f64:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI23_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI23_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB23_2
@@ -1232,8 +1237,9 @@ define double @roundeven_f64(double %a) nounwind {
;
; RV64IFD-LABEL: roundeven_f64:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI24_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB24_2
diff --git a/llvm/test/CodeGen/RISCV/double-round-conv.ll b/llvm/test/CodeGen/RISCV/double-round-conv.ll
index 3edbda3a4bf6b..6dd24c056e386 100644
--- a/llvm/test/CodeGen/RISCV/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/double-round-conv.ll
@@ -1145,8 +1145,9 @@ define double @test_floor_double(double %x) {
;
; RV64IFD-LABEL: test_floor_double:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI40_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB40_2
@@ -1194,8 +1195,9 @@ define double @test_ceil_double(double %x) {
;
; RV64IFD-LABEL: test_ceil_double:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI41_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI41_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB41_2
@@ -1243,8 +1245,9 @@ define double @test_trunc_double(double %x) {
;
; RV64IFD-LABEL: test_trunc_double:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI42_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB42_2
@@ -1292,8 +1295,9 @@ define double @test_round_double(double %x) {
;
; RV64IFD-LABEL: test_round_double:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI43_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI43_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB43_2
@@ -1341,8 +1345,9 @@ define double @test_roundeven_double(double %x) {
;
; RV64IFD-LABEL: test_roundeven_double:
; RV64IFD: # %bb.0:
-; RV64IFD-NEXT: lui a0, %hi(.LCPI44_0)
-; RV64IFD-NEXT: fld fa5, %lo(.LCPI44_0)(a0)
+; RV64IFD-NEXT: li a0, 1075
+; RV64IFD-NEXT: slli a0, a0, 52
+; RV64IFD-NEXT: fmv.d.x fa5, a0
; RV64IFD-NEXT: fabs.d fa4, fa0
; RV64IFD-NEXT: flt.d a0, fa4, fa5
; RV64IFD-NEXT: beqz a0, .LBB44_2
diff --git a/llvm/test/CodeGen/RISCV/double-zfa.ll b/llvm/test/CodeGen/RISCV/double-zfa.ll
index 2f35496b9b32c..f17c63ddb6cae 100644
--- a/llvm/test/CodeGen/RISCV/double-zfa.ll
+++ b/llvm/test/CodeGen/RISCV/double-zfa.ll
@@ -69,21 +69,35 @@ define double @loadfpimm8() {
}
define double @loadfpimm9() {
-; CHECK-LABEL: loadfpimm9:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI8_0)(a0)
-; CHECK-NEXT: ret
+; RV32IDZFA-LABEL: loadfpimm9:
+; RV32IDZFA: # %bb.0:
+; RV32IDZFA-NEXT: lui a0, %hi(.LCPI8_0)
+; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI8_0)(a0)
+; RV32IDZFA-NEXT: ret
+;
+; RV64DZFA-LABEL: loadfpimm9:
+; RV64DZFA: # %bb.0:
+; RV64DZFA-NEXT: lui a0, 131967
+; RV64DZFA-NEXT: slli a0, a0, 33
+; RV64DZFA-NEXT: fmv.d.x fa0, a0
+; RV64DZFA-NEXT: ret
ret double 255.0
}
; Negative test. This is 1 * 2^256.
define double @loadfpimm10() {
-; CHECK-LABEL: loadfpimm10:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI9_0)(a0)
-; CHECK-NEXT: ret
+; RV32IDZFA-LABEL: loadfpimm10:
+; RV32IDZFA: # %bb.0:
+; RV32IDZFA-NEXT: lui a0, %hi(.LCPI9_0)
+; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI9_0)(a0)
+; RV32IDZFA-NEXT: ret
+;
+; RV64DZFA-LABEL: loadfpimm10:
+; RV64DZFA: # %bb.0:
+; RV64DZFA-NEXT: li a0, 1
+; RV64DZFA-NEXT: slli a0, a0, 60
+; RV64DZFA-NEXT: fmv.d.x fa0, a0
+; RV64DZFA-NEXT: ret
ret double 0x1000000000000000
}
@@ -125,11 +139,18 @@ define double @loadfpimm13() {
; Negative test. This is 2^-1023, a denormal.
define double @loadfpimm15() {
-; CHECK-LABEL: loadfpimm15:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: ret
+; RV32IDZFA-LABEL: loadfpimm15:
+; RV32IDZFA: # %bb.0:
+; RV32IDZFA-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32IDZFA-NEXT: fld fa0, %lo(.LCPI13_0)(a0)
+; RV32IDZFA-NEXT: ret
+;
+; RV64DZFA-LABEL: loadfpimm15:
+; RV64DZFA: # %bb.0:
+; RV64DZFA-NEXT: li a0, 1
+; RV64DZFA-NEXT: slli a0, a0, 51
+; RV64DZFA-NEXT: fmv.d.x fa0, a0
+; RV64DZFA-NEXT: ret
ret double 0x0008000000000000
}
diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll
index 72578193ee4bf..e6e4f6642f685 100644
--- a/llvm/test/CodeGen/RISCV/float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert.ll
@@ -623,20 +623,21 @@ define i64 @fcvt_l_s_sat(float %a) nounwind {
; RV32IF-NEXT: # %bb.1: # %start
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: .LBB12_2: # %start
-; RV32IF-NEXT: lui a1, %hi(.LCPI12_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI12_0)(a1)
+; RV32IF-NEXT: lui a1, 389120
+; RV32IF-NEXT: addi a1, a1, -1
+; RV32IF-NEXT: fmv.w.x fa5, a1
; RV32IF-NEXT: flt.s a1, fa5, fs0
; RV32IF-NEXT: beqz a1, .LBB12_4
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: addi a2, a3, -1
; RV32IF-NEXT: .LBB12_4: # %start
; RV32IF-NEXT: feq.s a3, fs0, fs0
-; RV32IF-NEXT: neg a4, a1
-; RV32IF-NEXT: neg a1, s0
+; RV32IF-NEXT: neg a4, s0
+; RV32IF-NEXT: neg a5, a1
; RV32IF-NEXT: neg a3, a3
-; RV32IF-NEXT: and a0, a1, a0
+; RV32IF-NEXT: and a0, a4, a0
; RV32IF-NEXT: and a1, a3, a2
-; RV32IF-NEXT: or a0, a4, a0
+; RV32IF-NEXT: or a0, a5, a0
; RV32IF-NEXT: and a0, a3, a0
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -864,10 +865,11 @@ define i64 @fcvt_lu_s_sat(float %a) nounwind {
; RV32IF-NEXT: fle.s a0, fa5, fa0
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: call __fixunssfdi
-; RV32IF-NEXT: lui a2, %hi(.LCPI14_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI14_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
+; RV32IF-NEXT: lui a2, 391168
; RV32IF-NEXT: and a1, s0, a1
+; RV32IF-NEXT: addi a2, a2, -1
+; RV32IF-NEXT: fmv.w.x fa5, a2
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
@@ -1405,13 +1407,14 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV32IF-LABEL: fcvt_w_s_sat_i16:
; RV32IF: # %bb.0: # %start
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: lui a1, %hi(.LCPI24_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI24_0)(a1)
; RV32IF-NEXT: lui a1, 815104
-; RV32IF-NEXT: fmv.w.x fa4, a1
-; RV32IF-NEXT: fmax.s fa4, fa0, fa4
+; RV32IF-NEXT: fmv.w.x fa5, a1
+; RV32IF-NEXT: lui a1, 290816
; RV32IF-NEXT: neg a0, a0
-; RV32IF-NEXT: fmin.s fa5, fa4, fa5
+; RV32IF-NEXT: addi a1, a1, -512
+; RV32IF-NEXT: fmax.s fa5, fa0, fa5
+; RV32IF-NEXT: fmv.w.x fa4, a1
+; RV32IF-NEXT: fmin.s fa5, fa5, fa4
; RV32IF-NEXT: fcvt.w.s a1, fa5, rtz
; RV32IF-NEXT: and a0, a0, a1
; RV32IF-NEXT: ret
@@ -1419,13 +1422,14 @@ define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
; RV64IF-LABEL: fcvt_w_s_sat_i16:
; RV64IF: # %bb.0: # %start
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: lui a1, %hi(.LCPI24_0)
-; RV64IF-NEXT: flw fa5, %lo(.LCPI24_0)(a1)
; RV64IF-NEXT: lui a1, 815104
-; RV64IF-NEXT: fmv.w.x fa4, a1
-; RV64IF-NEXT: fmax.s fa4, fa0, fa4
+; RV64IF-NEXT: fmv.w.x fa5, a1
+; RV64IF-NEXT: lui a1, 290816
; RV64IF-NEXT: neg a0, a0
-; RV64IF-NEXT: fmin.s fa5, fa4, fa5
+; RV64IF-NEXT: addi a1, a1, -512
+; RV64IF-NEXT: fmax.s fa5, fa0, fa5
+; RV64IF-NEXT: fmv.w.x fa4, a1
+; RV64IF-NEXT: fmin.s fa5, fa5, fa4
; RV64IF-NEXT: fcvt.l.s a1, fa5, rtz
; RV64IF-NEXT: and a0, a0, a1
; RV64IF-NEXT: ret
@@ -1590,21 +1594,23 @@ define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
; RV32IF-LABEL: fcvt_wu_s_sat_i16:
; RV32IF: # %bb.0: # %start
-; RV32IF-NEXT: lui a0, %hi(.LCPI26_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI26_0)(a0)
-; RV32IF-NEXT: fmv.w.x fa4, zero
-; RV32IF-NEXT: fmax.s fa4, fa0, fa4
-; RV32IF-NEXT: fmin.s fa5, fa4, fa5
+; RV32IF-NEXT: fmv.w.x fa5, zero
+; RV32IF-NEXT: lui a0, 292864
+; RV32IF-NEXT: fmax.s fa5, fa0, fa5
+; RV32IF-NEXT: addi a0, a0, -256
+; RV32IF-NEXT: fmv.w.x fa4, a0
+; RV32IF-NEXT: fmin.s fa5, fa5, fa4
; RV32IF-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: fcvt_wu_s_sat_i16:
; RV64IF: # %bb.0: # %start
-; RV64IF-NEXT: lui a0, %hi(.LCPI26_0)
-; RV64IF-NEXT: flw fa5, %lo(.LCPI26_0)(a0)
-; RV64IF-NEXT: fmv.w.x fa4, zero
-; RV64IF-NEXT: fmax.s fa4, fa0, fa4
-; RV64IF-NEXT: fmin.s fa5, fa4, fa5
+; RV64IF-NEXT: fmv.w.x fa5, zero
+; RV64IF-NEXT: lui a0, 292864
+; RV64IF-NEXT: fmax.s fa5, fa0, fa5
+; RV64IF-NEXT: addi a0, a0, -256
+; RV64IF-NEXT: fmv.w.x fa4, a0
+; RV64IF-NEXT: fmin.s fa5, fa5, fa4
; RV64IF-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64IF-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll
index a010ab49b2827..e4e34543d6314 100644
--- a/llvm/test/CodeGen/RISCV/float-imm.ll
+++ b/llvm/test/CodeGen/RISCV/float-imm.ll
@@ -12,8 +12,9 @@
define float @float_imm() nounwind {
; CHECK-LABEL: float_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flw fa0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: lui a0, 263313
+; CHECK-NEXT: addi a0, a0, -37
+; CHECK-NEXT: fmv.w.x fa0, a0
; CHECK-NEXT: ret
;
; CHECKZFINX-LABEL: float_imm:
diff --git a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
index 809cc31abe612..6871f29cb8b05 100644
--- a/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/float-round-conv-sat.ll
@@ -60,8 +60,9 @@ define i64 @test_floor_si64(float %x) nounwind {
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: .LBB1_4:
-; RV32IF-NEXT: lui a1, %hi(.LCPI1_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI1_0)(a1)
+; RV32IF-NEXT: lui a1, 389120
+; RV32IF-NEXT: addi a1, a1, -1
+; RV32IF-NEXT: fmv.w.x fa5, a1
; RV32IF-NEXT: flt.s a1, fa5, fs0
; RV32IF-NEXT: beqz a1, .LBB1_6
; RV32IF-NEXT: # %bb.5:
@@ -196,10 +197,11 @@ define i64 @test_floor_ui64(float %x) nounwind {
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
; RV32IF-NEXT: call __fixunssfdi
-; RV32IF-NEXT: lui a2, %hi(.LCPI3_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI3_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
+; RV32IF-NEXT: lui a2, 391168
; RV32IF-NEXT: and a1, s0, a1
+; RV32IF-NEXT: addi a2, a2, -1
+; RV32IF-NEXT: fmv.w.x fa5, a2
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
@@ -318,8 +320,9 @@ define i64 @test_ceil_si64(float %x) nounwind {
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: .LBB5_4:
-; RV32IF-NEXT: lui a1, %hi(.LCPI5_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI5_0)(a1)
+; RV32IF-NEXT: lui a1, 389120
+; RV32IF-NEXT: addi a1, a1, -1
+; RV32IF-NEXT: fmv.w.x fa5, a1
; RV32IF-NEXT: flt.s a1, fa5, fs0
; RV32IF-NEXT: beqz a1, .LBB5_6
; RV32IF-NEXT: # %bb.5:
@@ -454,10 +457,11 @@ define i64 @test_ceil_ui64(float %x) nounwind {
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
; RV32IF-NEXT: call __fixunssfdi
-; RV32IF-NEXT: lui a2, %hi(.LCPI7_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI7_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
+; RV32IF-NEXT: lui a2, 391168
; RV32IF-NEXT: and a1, s0, a1
+; RV32IF-NEXT: addi a2, a2, -1
+; RV32IF-NEXT: fmv.w.x fa5, a2
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
@@ -576,8 +580,9 @@ define i64 @test_trunc_si64(float %x) nounwind {
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: .LBB9_4:
-; RV32IF-NEXT: lui a1, %hi(.LCPI9_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI9_0)(a1)
+; RV32IF-NEXT: lui a1, 389120
+; RV32IF-NEXT: addi a1, a1, -1
+; RV32IF-NEXT: fmv.w.x fa5, a1
; RV32IF-NEXT: flt.s a1, fa5, fs0
; RV32IF-NEXT: beqz a1, .LBB9_6
; RV32IF-NEXT: # %bb.5:
@@ -712,10 +717,11 @@ define i64 @test_trunc_ui64(float %x) nounwind {
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
; RV32IF-NEXT: call __fixunssfdi
-; RV32IF-NEXT: lui a2, %hi(.LCPI11_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI11_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
+; RV32IF-NEXT: lui a2, 391168
; RV32IF-NEXT: and a1, s0, a1
+; RV32IF-NEXT: addi a2, a2, -1
+; RV32IF-NEXT: fmv.w.x fa5, a2
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
@@ -834,8 +840,9 @@ define i64 @test_round_si64(float %x) nounwind {
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: .LBB13_4:
-; RV32IF-NEXT: lui a1, %hi(.LCPI13_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI13_0)(a1)
+; RV32IF-NEXT: lui a1, 389120
+; RV32IF-NEXT: addi a1, a1, -1
+; RV32IF-NEXT: fmv.w.x fa5, a1
; RV32IF-NEXT: flt.s a1, fa5, fs0
; RV32IF-NEXT: beqz a1, .LBB13_6
; RV32IF-NEXT: # %bb.5:
@@ -970,10 +977,11 @@ define i64 @test_round_ui64(float %x) nounwind {
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
; RV32IF-NEXT: call __fixunssfdi
-; RV32IF-NEXT: lui a2, %hi(.LCPI15_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI15_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
+; RV32IF-NEXT: lui a2, 391168
; RV32IF-NEXT: and a1, s0, a1
+; RV32IF-NEXT: addi a2, a2, -1
+; RV32IF-NEXT: fmv.w.x fa5, a2
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
@@ -1092,8 +1100,9 @@ define i64 @test_roundeven_si64(float %x) nounwind {
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: .LBB17_4:
-; RV32IF-NEXT: lui a1, %hi(.LCPI17_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI17_0)(a1)
+; RV32IF-NEXT: lui a1, 389120
+; RV32IF-NEXT: addi a1, a1, -1
+; RV32IF-NEXT: fmv.w.x fa5, a1
; RV32IF-NEXT: flt.s a1, fa5, fs0
; RV32IF-NEXT: beqz a1, .LBB17_6
; RV32IF-NEXT: # %bb.5:
@@ -1228,10 +1237,11 @@ define i64 @test_roundeven_ui64(float %x) nounwind {
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
; RV32IF-NEXT: call __fixunssfdi
-; RV32IF-NEXT: lui a2, %hi(.LCPI19_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI19_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
+; RV32IF-NEXT: lui a2, 391168
; RV32IF-NEXT: and a1, s0, a1
+; RV32IF-NEXT: addi a2, a2, -1
+; RV32IF-NEXT: fmv.w.x fa5, a2
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
@@ -1350,8 +1360,9 @@ define i64 @test_rint_si64(float %x) nounwind {
; RV32IF-NEXT: # %bb.3:
; RV32IF-NEXT: mv a2, a1
; RV32IF-NEXT: .LBB21_4:
-; RV32IF-NEXT: lui a1, %hi(.LCPI21_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI21_0)(a1)
+; RV32IF-NEXT: lui a1, 389120
+; RV32IF-NEXT: addi a1, a1, -1
+; RV32IF-NEXT: fmv.w.x fa5, a1
; RV32IF-NEXT: flt.s a1, fa5, fs0
; RV32IF-NEXT: beqz a1, .LBB21_6
; RV32IF-NEXT: # %bb.5:
@@ -1486,10 +1497,11 @@ define i64 @test_rint_ui64(float %x) nounwind {
; RV32IF-NEXT: neg s0, a0
; RV32IF-NEXT: fmv.s fa0, fs0
; RV32IF-NEXT: call __fixunssfdi
-; RV32IF-NEXT: lui a2, %hi(.LCPI23_0)
-; RV32IF-NEXT: flw fa5, %lo(.LCPI23_0)(a2)
; RV32IF-NEXT: and a0, s0, a0
+; RV32IF-NEXT: lui a2, 391168
; RV32IF-NEXT: and a1, s0, a1
+; RV32IF-NEXT: addi a2, a2, -1
+; RV32IF-NEXT: fmv.w.x fa5, a2
; RV32IF-NEXT: flt.s a2, fa5, fs0
; RV32IF-NEXT: neg a2, a2
; RV32IF-NEXT: or a0, a2, a0
diff --git a/llvm/test/CodeGen/RISCV/half-arith.ll b/llvm/test/CodeGen/RISCV/half-arith.ll
index 84163b52bb98d..2ebb6e9b97a4d 100644
--- a/llvm/test/CodeGen/RISCV/half-arith.ll
+++ b/llvm/test/CodeGen/RISCV/half-arith.ll
@@ -2883,39 +2883,20 @@ define half @fsgnjx_f16(half %x, half %y) nounwind {
; RV64I-NEXT: addi sp, sp, 32
; RV64I-NEXT: ret
;
-; RV32IZFHMIN-LABEL: fsgnjx_f16:
-; RV32IZFHMIN: # %bb.0:
-; RV32IZFHMIN-NEXT: lui a0, %hi(.LCPI23_0)
-; RV32IZFHMIN-NEXT: lhu a0, %lo(.LCPI23_0)(a0)
-; RV32IZFHMIN-NEXT: fmv.x.h a1, fa0
-; RV32IZFHMIN-NEXT: lui a2, 1048568
-; RV32IZFHMIN-NEXT: and a1, a1, a2
-; RV32IZFHMIN-NEXT: slli a0, a0, 17
-; RV32IZFHMIN-NEXT: srli a0, a0, 17
-; RV32IZFHMIN-NEXT: or a0, a0, a1
-; RV32IZFHMIN-NEXT: fmv.h.x fa5, a0
-; RV32IZFHMIN-NEXT: fcvt.s.h fa5, fa5
-; RV32IZFHMIN-NEXT: fcvt.s.h fa4, fa1
-; RV32IZFHMIN-NEXT: fmul.s fa5, fa5, fa4
-; RV32IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV32IZFHMIN-NEXT: ret
-;
-; RV64IZFHMIN-LABEL: fsgnjx_f16:
-; RV64IZFHMIN: # %bb.0:
-; RV64IZFHMIN-NEXT: lui a0, %hi(.LCPI23_0)
-; RV64IZFHMIN-NEXT: lhu a0, %lo(.LCPI23_0)(a0)
-; RV64IZFHMIN-NEXT: fmv.x.h a1, fa0
-; RV64IZFHMIN-NEXT: lui a2, 1048568
-; RV64IZFHMIN-NEXT: and a1, a1, a2
-; RV64IZFHMIN-NEXT: slli a0, a0, 49
-; RV64IZFHMIN-NEXT: srli a0, a0, 49
-; RV64IZFHMIN-NEXT: or a0, a0, a1
-; RV64IZFHMIN-NEXT: fmv.h.x fa5, a0
-; RV64IZFHMIN-NEXT: fcvt.s.h fa5, fa5
-; RV64IZFHMIN-NEXT: fcvt.s.h fa4, fa1
-; RV64IZFHMIN-NEXT: fmul.s fa5, fa5, fa4
-; RV64IZFHMIN-NEXT: fcvt.h.s fa0, fa5
-; RV64IZFHMIN-NEXT: ret
+; CHECKIZFHMIN-LABEL: fsgnjx_f16:
+; CHECKIZFHMIN: # %bb.0:
+; CHECKIZFHMIN-NEXT: fmv.x.h a0, fa0
+; CHECKIZFHMIN-NEXT: lui a1, 1048568
+; CHECKIZFHMIN-NEXT: and a0, a0, a1
+; CHECKIZFHMIN-NEXT: li a1, 15
+; CHECKIZFHMIN-NEXT: slli a1, a1, 10
+; CHECKIZFHMIN-NEXT: or a0, a0, a1
+; CHECKIZFHMIN-NEXT: fmv.h.x fa5, a0
+; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa5
+; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa1
+; CHECKIZFHMIN-NEXT: fmul.s fa5, fa5, fa4
+; CHECKIZFHMIN-NEXT: fcvt.h.s fa0, fa5
+; CHECKIZFHMIN-NEXT: ret
;
; CHECKIZHINXMIN-LABEL: fsgnjx_f16:
; CHECKIZHINXMIN: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/half-convert.ll b/llvm/test/CodeGen/RISCV/half-convert.ll
index 6cebf8b2828bf..c3c06e192f76f 100644
--- a/llvm/test/CodeGen/RISCV/half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert.ll
@@ -194,13 +194,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32IZFH-LABEL: fcvt_si_h_sat:
; RV32IZFH: # %bb.0: # %start
; RV32IZFH-NEXT: fcvt.s.h fa5, fa0
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0)
-; RV32IZFH-NEXT: feq.s a1, fa5, fa5
-; RV32IZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; RV32IZFH-NEXT: lui a0, 815104
-; RV32IZFH-NEXT: fmv.w.x fa3, a0
-; RV32IZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV32IZFH-NEXT: neg a0, a1
+; RV32IZFH-NEXT: lui a1, 290816
+; RV32IZFH-NEXT: fmv.w.x fa4, a0
+; RV32IZFH-NEXT: feq.s a0, fa5, fa5
+; RV32IZFH-NEXT: addi a1, a1, -512
+; RV32IZFH-NEXT: neg a0, a0
+; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IZFH-NEXT: fmv.w.x fa4, a1
; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IZFH-NEXT: fcvt.w.s a1, fa5, rtz
; RV32IZFH-NEXT: and a0, a0, a1
@@ -209,13 +210,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64IZFH-LABEL: fcvt_si_h_sat:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: fcvt.s.h fa5, fa0
-; RV64IZFH-NEXT: lui a0, %hi(.LCPI1_0)
-; RV64IZFH-NEXT: feq.s a1, fa5, fa5
-; RV64IZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; RV64IZFH-NEXT: lui a0, 815104
-; RV64IZFH-NEXT: fmv.w.x fa3, a0
-; RV64IZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV64IZFH-NEXT: neg a0, a1
+; RV64IZFH-NEXT: lui a1, 290816
+; RV64IZFH-NEXT: fmv.w.x fa4, a0
+; RV64IZFH-NEXT: feq.s a0, fa5, fa5
+; RV64IZFH-NEXT: addi a1, a1, -512
+; RV64IZFH-NEXT: neg a0, a0
+; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IZFH-NEXT: fmv.w.x fa4, a1
; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IZFH-NEXT: fcvt.l.s a1, fa5, rtz
; RV64IZFH-NEXT: and a0, a0, a1
@@ -224,13 +226,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32IDZFH-LABEL: fcvt_si_h_sat:
; RV32IDZFH: # %bb.0: # %start
; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0
-; RV32IDZFH-NEXT: lui a0, %hi(.LCPI1_0)
-; RV32IDZFH-NEXT: feq.s a1, fa5, fa5
-; RV32IDZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; RV32IDZFH-NEXT: lui a0, 815104
-; RV32IDZFH-NEXT: fmv.w.x fa3, a0
-; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV32IDZFH-NEXT: neg a0, a1
+; RV32IDZFH-NEXT: lui a1, 290816
+; RV32IDZFH-NEXT: fmv.w.x fa4, a0
+; RV32IDZFH-NEXT: feq.s a0, fa5, fa5
+; RV32IDZFH-NEXT: addi a1, a1, -512
+; RV32IDZFH-NEXT: neg a0, a0
+; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IDZFH-NEXT: fmv.w.x fa4, a1
; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IDZFH-NEXT: fcvt.w.s a1, fa5, rtz
; RV32IDZFH-NEXT: and a0, a0, a1
@@ -239,13 +242,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64IDZFH-LABEL: fcvt_si_h_sat:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0
-; RV64IDZFH-NEXT: lui a0, %hi(.LCPI1_0)
-; RV64IDZFH-NEXT: feq.s a1, fa5, fa5
-; RV64IDZFH-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; RV64IDZFH-NEXT: lui a0, 815104
-; RV64IDZFH-NEXT: fmv.w.x fa3, a0
-; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV64IDZFH-NEXT: neg a0, a1
+; RV64IDZFH-NEXT: lui a1, 290816
+; RV64IDZFH-NEXT: fmv.w.x fa4, a0
+; RV64IDZFH-NEXT: feq.s a0, fa5, fa5
+; RV64IDZFH-NEXT: addi a1, a1, -512
+; RV64IDZFH-NEXT: neg a0, a0
+; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IDZFH-NEXT: fmv.w.x fa4, a1
; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IDZFH-NEXT: fcvt.l.s a1, fa5, rtz
; RV64IDZFH-NEXT: and a0, a0, a1
@@ -399,13 +403,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
-; RV32ID-ILP32-NEXT: lui a0, %hi(.LCPI1_0)
-; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
-; RV32ID-ILP32-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; RV32ID-ILP32-NEXT: lui a0, 815104
-; RV32ID-ILP32-NEXT: fmv.w.x fa3, a0
-; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa3
-; RV32ID-ILP32-NEXT: neg a0, a1
+; RV32ID-ILP32-NEXT: lui a1, 290816
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
+; RV32ID-ILP32-NEXT: feq.s a0, fa5, fa5
+; RV32ID-ILP32-NEXT: addi a1, a1, -512
+; RV32ID-ILP32-NEXT: neg a0, a0
+; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, a1
; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-ILP32-NEXT: fcvt.w.s a1, fa5, rtz
; RV32ID-ILP32-NEXT: and a0, a0, a1
@@ -419,13 +424,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
-; RV64ID-LP64-NEXT: lui a0, %hi(.LCPI1_0)
-; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
-; RV64ID-LP64-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; RV64ID-LP64-NEXT: lui a0, 815104
-; RV64ID-LP64-NEXT: fmv.w.x fa3, a0
-; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa3
-; RV64ID-LP64-NEXT: neg a0, a1
+; RV64ID-LP64-NEXT: lui a1, 290816
+; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
+; RV64ID-LP64-NEXT: feq.s a0, fa5, fa5
+; RV64ID-LP64-NEXT: addi a1, a1, -512
+; RV64ID-LP64-NEXT: neg a0, a0
+; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4
+; RV64ID-LP64-NEXT: fmv.w.x fa4, a1
; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-LP64-NEXT: fcvt.l.s a1, fa5, rtz
; RV64ID-LP64-NEXT: and a0, a0, a1
@@ -439,13 +445,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: feq.s a0, fa0, fa0
-; RV32ID-NEXT: lui a1, %hi(.LCPI1_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI1_0)(a1)
; RV32ID-NEXT: lui a1, 815104
-; RV32ID-NEXT: fmv.w.x fa4, a1
-; RV32ID-NEXT: fmax.s fa4, fa0, fa4
+; RV32ID-NEXT: fmv.w.x fa5, a1
+; RV32ID-NEXT: lui a1, 290816
; RV32ID-NEXT: neg a0, a0
-; RV32ID-NEXT: fmin.s fa5, fa4, fa5
+; RV32ID-NEXT: addi a1, a1, -512
+; RV32ID-NEXT: fmax.s fa5, fa0, fa5
+; RV32ID-NEXT: fmv.w.x fa4, a1
+; RV32ID-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-NEXT: fcvt.w.s a1, fa5, rtz
; RV32ID-NEXT: and a0, a0, a1
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -458,13 +465,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: feq.s a0, fa0, fa0
-; RV64ID-NEXT: lui a1, %hi(.LCPI1_0)
-; RV64ID-NEXT: flw fa5, %lo(.LCPI1_0)(a1)
; RV64ID-NEXT: lui a1, 815104
-; RV64ID-NEXT: fmv.w.x fa4, a1
-; RV64ID-NEXT: fmax.s fa4, fa0, fa4
+; RV64ID-NEXT: fmv.w.x fa5, a1
+; RV64ID-NEXT: lui a1, 290816
; RV64ID-NEXT: neg a0, a0
-; RV64ID-NEXT: fmin.s fa5, fa4, fa5
+; RV64ID-NEXT: addi a1, a1, -512
+; RV64ID-NEXT: fmax.s fa5, fa0, fa5
+; RV64ID-NEXT: fmv.w.x fa4, a1
+; RV64ID-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-NEXT: fcvt.l.s a1, fa5, rtz
; RV64ID-NEXT: and a0, a0, a1
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -474,13 +482,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; CHECK32-IZFHMIN-LABEL: fcvt_si_h_sat:
; CHECK32-IZFHMIN: # %bb.0: # %start
; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
-; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK32-IZFHMIN-NEXT: feq.s a1, fa5, fa5
-; CHECK32-IZFHMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; CHECK32-IZFHMIN-NEXT: lui a0, 815104
-; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, a0
-; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3
-; CHECK32-IZFHMIN-NEXT: neg a0, a1
+; CHECK32-IZFHMIN-NEXT: lui a1, 290816
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK32-IZFHMIN-NEXT: feq.s a0, fa5, fa5
+; CHECK32-IZFHMIN-NEXT: addi a1, a1, -512
+; CHECK32-IZFHMIN-NEXT: neg a0, a0
+; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a1
; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK32-IZFHMIN-NEXT: fcvt.w.s a1, fa5, rtz
; CHECK32-IZFHMIN-NEXT: and a0, a0, a1
@@ -489,13 +498,14 @@ define i16 @fcvt_si_h_sat(half %a) nounwind {
; CHECK64-IZFHMIN-LABEL: fcvt_si_h_sat:
; CHECK64-IZFHMIN: # %bb.0: # %start
; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
-; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK64-IZFHMIN-NEXT: feq.s a1, fa5, fa5
-; CHECK64-IZFHMIN-NEXT: flw fa4, %lo(.LCPI1_0)(a0)
; CHECK64-IZFHMIN-NEXT: lui a0, 815104
-; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, a0
-; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3
-; CHECK64-IZFHMIN-NEXT: neg a0, a1
+; CHECK64-IZFHMIN-NEXT: lui a1, 290816
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK64-IZFHMIN-NEXT: feq.s a0, fa5, fa5
+; CHECK64-IZFHMIN-NEXT: addi a1, a1, -512
+; CHECK64-IZFHMIN-NEXT: neg a0, a0
+; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a1
; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK64-IZFHMIN-NEXT: fcvt.l.s a1, fa5, rtz
; CHECK64-IZFHMIN-NEXT: and a0, a0, a1
@@ -711,45 +721,49 @@ define i16 @fcvt_ui_h(half %a) nounwind {
define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV32IZFH-LABEL: fcvt_ui_h_sat:
; RV32IZFH: # %bb.0: # %start
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV32IZFH-NEXT: fcvt.s.h fa4, fa0
-; RV32IZFH-NEXT: fmv.w.x fa3, zero
-; RV32IZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV32IZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV32IZFH-NEXT: fcvt.s.h fa5, fa0
+; RV32IZFH-NEXT: fmv.w.x fa4, zero
+; RV32IZFH-NEXT: lui a0, 292864
+; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IZFH-NEXT: addi a0, a0, -256
+; RV32IZFH-NEXT: fmv.w.x fa4, a0
+; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IZFH-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcvt_ui_h_sat:
; RV64IZFH: # %bb.0: # %start
-; RV64IZFH-NEXT: lui a0, %hi(.LCPI3_0)
-; RV64IZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV64IZFH-NEXT: fcvt.s.h fa4, fa0
-; RV64IZFH-NEXT: fmv.w.x fa3, zero
-; RV64IZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV64IZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV64IZFH-NEXT: fcvt.s.h fa5, fa0
+; RV64IZFH-NEXT: fmv.w.x fa4, zero
+; RV64IZFH-NEXT: lui a0, 292864
+; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IZFH-NEXT: addi a0, a0, -256
+; RV64IZFH-NEXT: fmv.w.x fa4, a0
+; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IZFH-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_ui_h_sat:
; RV32IDZFH: # %bb.0: # %start
-; RV32IDZFH-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV32IDZFH-NEXT: fcvt.s.h fa4, fa0
-; RV32IDZFH-NEXT: fmv.w.x fa3, zero
-; RV32IDZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV32IDZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0
+; RV32IDZFH-NEXT: fmv.w.x fa4, zero
+; RV32IDZFH-NEXT: lui a0, 292864
+; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IDZFH-NEXT: addi a0, a0, -256
+; RV32IDZFH-NEXT: fmv.w.x fa4, a0
+; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IDZFH-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32IDZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_ui_h_sat:
; RV64IDZFH: # %bb.0: # %start
-; RV64IDZFH-NEXT: lui a0, %hi(.LCPI3_0)
-; RV64IDZFH-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV64IDZFH-NEXT: fcvt.s.h fa4, fa0
-; RV64IDZFH-NEXT: fmv.w.x fa3, zero
-; RV64IDZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV64IDZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0
+; RV64IDZFH-NEXT: fmv.w.x fa4, zero
+; RV64IDZFH-NEXT: lui a0, 292864
+; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IDZFH-NEXT: addi a0, a0, -256
+; RV64IDZFH-NEXT: fmv.w.x fa4, a0
+; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IDZFH-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64IDZFH-NEXT: ret
;
@@ -874,12 +888,13 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
-; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI3_0)
-; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI3_0)(a1)
+; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, zero
+; RV32ID-ILP32-NEXT: lui a0, 292864
+; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4
+; RV32ID-ILP32-NEXT: addi a0, a0, -256
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
-; RV32ID-ILP32-NEXT: fmv.w.x fa3, zero
-; RV32ID-ILP32-NEXT: fmax.s fa4, fa4, fa3
-; RV32ID-ILP32-NEXT: fmin.s fa5, fa4, fa5
+; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-ILP32-NEXT: addi sp, sp, 16
@@ -890,12 +905,13 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: call __extendhfsf2
-; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI3_0)
-; RV64ID-LP64-NEXT: flw fa5, %lo(.LCPI3_0)(a1)
+; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
+; RV64ID-LP64-NEXT: fmv.w.x fa4, zero
+; RV64ID-LP64-NEXT: lui a0, 292864
+; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4
+; RV64ID-LP64-NEXT: addi a0, a0, -256
; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
-; RV64ID-LP64-NEXT: fmv.w.x fa3, zero
-; RV64ID-LP64-NEXT: fmax.s fa4, fa4, fa3
-; RV64ID-LP64-NEXT: fmin.s fa5, fa4, fa5
+; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-LP64-NEXT: addi sp, sp, 16
@@ -906,11 +922,12 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: call __extendhfsf2
-; RV32ID-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV32ID-NEXT: fmv.w.x fa4, zero
-; RV32ID-NEXT: fmax.s fa4, fa0, fa4
-; RV32ID-NEXT: fmin.s fa5, fa4, fa5
+; RV32ID-NEXT: fmv.w.x fa5, zero
+; RV32ID-NEXT: lui a0, 292864
+; RV32ID-NEXT: fmax.s fa5, fa0, fa5
+; RV32ID-NEXT: addi a0, a0, -256
+; RV32ID-NEXT: fmv.w.x fa4, a0
+; RV32ID-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -921,11 +938,12 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: call __extendhfsf2
-; RV64ID-NEXT: lui a0, %hi(.LCPI3_0)
-; RV64ID-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; RV64ID-NEXT: fmv.w.x fa4, zero
-; RV64ID-NEXT: fmax.s fa4, fa0, fa4
-; RV64ID-NEXT: fmin.s fa5, fa4, fa5
+; RV64ID-NEXT: fmv.w.x fa5, zero
+; RV64ID-NEXT: lui a0, 292864
+; RV64ID-NEXT: fmax.s fa5, fa0, fa5
+; RV64ID-NEXT: addi a0, a0, -256
+; RV64ID-NEXT: fmv.w.x fa4, a0
+; RV64ID-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -933,23 +951,25 @@ define i16 @fcvt_ui_h_sat(half %a) nounwind {
;
; CHECK32-IZFHMIN-LABEL: fcvt_ui_h_sat:
; CHECK32-IZFHMIN: # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK32-IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa4, fa0
-; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, zero
-; CHECK32-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3
-; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5
+; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, zero
+; CHECK32-IZFHMIN-NEXT: lui a0, 292864
+; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK32-IZFHMIN-NEXT: addi a0, a0, -256
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK32-IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz
; CHECK32-IZFHMIN-NEXT: ret
;
; CHECK64-IZFHMIN-LABEL: fcvt_ui_h_sat:
; CHECK64-IZFHMIN: # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK64-IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a0)
-; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa4, fa0
-; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, zero
-; CHECK64-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3
-; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5
+; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, zero
+; CHECK64-IZFHMIN-NEXT: lui a0, 292864
+; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK64-IZFHMIN-NEXT: addi a0, a0, -256
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz
; CHECK64-IZFHMIN-NEXT: ret
;
@@ -2159,20 +2179,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IZFH-NEXT: # %bb.1: # %start
; RV32IZFH-NEXT: mv a2, a1
; RV32IZFH-NEXT: .LBB10_2: # %start
-; RV32IZFH-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32IZFH-NEXT: lui a1, 389120
+; RV32IZFH-NEXT: addi a1, a1, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a1
; RV32IZFH-NEXT: flt.s a1, fa5, fs0
; RV32IZFH-NEXT: beqz a1, .LBB10_4
; RV32IZFH-NEXT: # %bb.3:
; RV32IZFH-NEXT: addi a2, a3, -1
; RV32IZFH-NEXT: .LBB10_4: # %start
; RV32IZFH-NEXT: feq.s a3, fs0, fs0
-; RV32IZFH-NEXT: neg a4, a1
-; RV32IZFH-NEXT: neg a1, s0
+; RV32IZFH-NEXT: neg a4, s0
+; RV32IZFH-NEXT: neg a5, a1
; RV32IZFH-NEXT: neg a3, a3
-; RV32IZFH-NEXT: and a0, a1, a0
+; RV32IZFH-NEXT: and a0, a4, a0
; RV32IZFH-NEXT: and a1, a3, a2
-; RV32IZFH-NEXT: or a0, a4, a0
+; RV32IZFH-NEXT: or a0, a5, a0
; RV32IZFH-NEXT: and a0, a3, a0
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2207,20 +2228,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IDZFH-NEXT: # %bb.1: # %start
; RV32IDZFH-NEXT: mv a2, a1
; RV32IDZFH-NEXT: .LBB10_2: # %start
-; RV32IDZFH-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32IDZFH-NEXT: lui a1, 389120
+; RV32IDZFH-NEXT: addi a1, a1, -1
+; RV32IDZFH-NEXT: fmv.w.x fa5, a1
; RV32IDZFH-NEXT: flt.s a1, fa5, fs0
; RV32IDZFH-NEXT: beqz a1, .LBB10_4
; RV32IDZFH-NEXT: # %bb.3:
; RV32IDZFH-NEXT: addi a2, a3, -1
; RV32IDZFH-NEXT: .LBB10_4: # %start
; RV32IDZFH-NEXT: feq.s a3, fs0, fs0
-; RV32IDZFH-NEXT: neg a4, a1
-; RV32IDZFH-NEXT: neg a1, s0
+; RV32IDZFH-NEXT: neg a4, s0
+; RV32IDZFH-NEXT: neg a5, a1
; RV32IDZFH-NEXT: neg a3, a3
-; RV32IDZFH-NEXT: and a0, a1, a0
+; RV32IDZFH-NEXT: and a0, a4, a0
; RV32IDZFH-NEXT: and a1, a3, a2
-; RV32IDZFH-NEXT: or a0, a4, a0
+; RV32IDZFH-NEXT: or a0, a5, a0
; RV32IDZFH-NEXT: and a0, a3, a0
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2450,8 +2472,9 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32ID-ILP32-NEXT: # %bb.1: # %start
; RV32ID-ILP32-NEXT: mv a2, a1
; RV32ID-ILP32-NEXT: .LBB10_2: # %start
-; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32ID-ILP32-NEXT: lui a1, 389120
+; RV32ID-ILP32-NEXT: addi a1, a1, -1
+; RV32ID-ILP32-NEXT: fmv.w.x fa5, a1
; RV32ID-ILP32-NEXT: flw fa4, 4(sp) # 4-byte Folded Reload
; RV32ID-ILP32-NEXT: flt.s a1, fa5, fa4
; RV32ID-ILP32-NEXT: fmv.s fa5, fa4
@@ -2505,8 +2528,9 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32ID-NEXT: # %bb.1: # %start
; RV32ID-NEXT: mv a2, a1
; RV32ID-NEXT: .LBB10_2: # %start
-; RV32ID-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32ID-NEXT: lui a1, 389120
+; RV32ID-NEXT: addi a1, a1, -1
+; RV32ID-NEXT: fmv.w.x fa5, a1
; RV32ID-NEXT: flt.s a1, fa5, fs0
; RV32ID-NEXT: beqz a1, .LBB10_4
; RV32ID-NEXT: # %bb.3:
@@ -2558,20 +2582,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IFZFHMIN-NEXT: # %bb.1: # %start
; RV32IFZFHMIN-NEXT: mv a2, a1
; RV32IFZFHMIN-NEXT: .LBB10_2: # %start
-; RV32IFZFHMIN-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32IFZFHMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32IFZFHMIN-NEXT: lui a1, 389120
+; RV32IFZFHMIN-NEXT: addi a1, a1, -1
+; RV32IFZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IFZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IFZFHMIN-NEXT: beqz a1, .LBB10_4
; RV32IFZFHMIN-NEXT: # %bb.3:
; RV32IFZFHMIN-NEXT: addi a2, a3, -1
; RV32IFZFHMIN-NEXT: .LBB10_4: # %start
; RV32IFZFHMIN-NEXT: feq.s a3, fs0, fs0
-; RV32IFZFHMIN-NEXT: neg a4, a1
-; RV32IFZFHMIN-NEXT: neg a1, s0
+; RV32IFZFHMIN-NEXT: neg a4, s0
+; RV32IFZFHMIN-NEXT: neg a5, a1
; RV32IFZFHMIN-NEXT: neg a3, a3
-; RV32IFZFHMIN-NEXT: and a0, a1, a0
+; RV32IFZFHMIN-NEXT: and a0, a4, a0
; RV32IFZFHMIN-NEXT: and a1, a3, a2
-; RV32IFZFHMIN-NEXT: or a0, a4, a0
+; RV32IFZFHMIN-NEXT: or a0, a5, a0
; RV32IFZFHMIN-NEXT: and a0, a3, a0
; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2607,20 +2632,21 @@ define i64 @fcvt_l_h_sat(half %a) nounwind {
; RV32IDZFHMIN-NEXT: # %bb.1: # %start
; RV32IDZFHMIN-NEXT: mv a2, a1
; RV32IDZFHMIN-NEXT: .LBB10_2: # %start
-; RV32IDZFHMIN-NEXT: lui a1, %hi(.LCPI10_0)
-; RV32IDZFHMIN-NEXT: flw fa5, %lo(.LCPI10_0)(a1)
+; RV32IDZFHMIN-NEXT: lui a1, 389120
+; RV32IDZFHMIN-NEXT: addi a1, a1, -1
+; RV32IDZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IDZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IDZFHMIN-NEXT: beqz a1, .LBB10_4
; RV32IDZFHMIN-NEXT: # %bb.3:
; RV32IDZFHMIN-NEXT: addi a2, a3, -1
; RV32IDZFHMIN-NEXT: .LBB10_4: # %start
; RV32IDZFHMIN-NEXT: feq.s a3, fs0, fs0
-; RV32IDZFHMIN-NEXT: neg a4, a1
-; RV32IDZFHMIN-NEXT: neg a1, s0
+; RV32IDZFHMIN-NEXT: neg a4, s0
+; RV32IDZFHMIN-NEXT: neg a5, a1
; RV32IDZFHMIN-NEXT: neg a3, a3
-; RV32IDZFHMIN-NEXT: and a0, a1, a0
+; RV32IDZFHMIN-NEXT: and a0, a4, a0
; RV32IDZFHMIN-NEXT: and a1, a3, a2
-; RV32IDZFHMIN-NEXT: or a0, a4, a0
+; RV32IDZFHMIN-NEXT: or a0, a5, a0
; RV32IDZFHMIN-NEXT: and a0, a3, a0
; RV32IDZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -2903,23 +2929,25 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IZFH-NEXT: addi sp, sp, -16
; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI12_0)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI12_0)(a0)
-; RV32IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IZFH-NEXT: fmv.w.x fa4, zero
-; RV32IZFH-NEXT: fle.s a0, fa4, fa0
-; RV32IZFH-NEXT: flt.s a1, fa5, fa0
-; RV32IZFH-NEXT: neg s0, a1
-; RV32IZFH-NEXT: neg s1, a0
+; RV32IZFH-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
+; RV32IZFH-NEXT: fcvt.s.h fs0, fa0
+; RV32IZFH-NEXT: fmv.w.x fa5, zero
+; RV32IZFH-NEXT: fle.s a0, fa5, fs0
+; RV32IZFH-NEXT: neg s0, a0
+; RV32IZFH-NEXT: fmv.s fa0, fs0
; RV32IZFH-NEXT: call __fixunssfdi
-; RV32IZFH-NEXT: and a0, s1, a0
-; RV32IZFH-NEXT: and a1, s1, a1
-; RV32IZFH-NEXT: or a0, s0, a0
-; RV32IZFH-NEXT: or a1, s0, a1
+; RV32IZFH-NEXT: and a0, s0, a0
+; RV32IZFH-NEXT: lui a2, 391168
+; RV32IZFH-NEXT: and a1, s0, a1
+; RV32IZFH-NEXT: addi a2, a2, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a2
+; RV32IZFH-NEXT: flt.s a2, fa5, fs0
+; RV32IZFH-NEXT: neg a2, a2
+; RV32IZFH-NEXT: or a0, a2, a0
+; RV32IZFH-NEXT: or a1, a2, a1
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
-; RV32IZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
;
@@ -2937,23 +2965,25 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32IDZFH-NEXT: addi sp, sp, -16
; RV32IDZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IDZFH-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; RV32IDZFH-NEXT: lui a0, %hi(.LCPI12_0)
-; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI12_0)(a0)
-; RV32IDZFH-NEXT: fcvt.s.h fa0, fa0
-; RV32IDZFH-NEXT: fmv.w.x fa4, zero
-; RV32IDZFH-NEXT: fle.s a0, fa4, fa0
-; RV32IDZFH-NEXT: flt.s a1, fa5, fa0
-; RV32IDZFH-NEXT: neg s0, a1
-; RV32IDZFH-NEXT: neg s1, a0
+; RV32IDZFH-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
+; RV32IDZFH-NEXT: fcvt.s.h fs0, fa0
+; RV32IDZFH-NEXT: fmv.w.x fa5, zero
+; RV32IDZFH-NEXT: fle.s a0, fa5, fs0
+; RV32IDZFH-NEXT: neg s0, a0
+; RV32IDZFH-NEXT: fmv.s fa0, fs0
; RV32IDZFH-NEXT: call __fixunssfdi
-; RV32IDZFH-NEXT: and a0, s1, a0
-; RV32IDZFH-NEXT: and a1, s1, a1
-; RV32IDZFH-NEXT: or a0, s0, a0
-; RV32IDZFH-NEXT: or a1, s0, a1
+; RV32IDZFH-NEXT: and a0, s0, a0
+; RV32IDZFH-NEXT: lui a2, 391168
+; RV32IDZFH-NEXT: and a1, s0, a1
+; RV32IDZFH-NEXT: addi a2, a2, -1
+; RV32IDZFH-NEXT: fmv.w.x fa5, a2
+; RV32IDZFH-NEXT: flt.s a2, fa5, fs0
+; RV32IDZFH-NEXT: neg a2, a2
+; RV32IDZFH-NEXT: or a0, a2, a0
+; RV32IDZFH-NEXT: or a1, a2, a1
; RV32IDZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IDZFH-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
-; RV32IDZFH-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32IDZFH-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
; RV32IDZFH-NEXT: addi sp, sp, 16
; RV32IDZFH-NEXT: ret
;
@@ -3105,14 +3135,15 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32ID-ILP32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
-; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI12_0)
-; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI12_0)(a1)
-; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
-; RV32ID-ILP32-NEXT: fmv.w.x fa3, zero
-; RV32ID-ILP32-NEXT: fle.s a1, fa3, fa4
-; RV32ID-ILP32-NEXT: flt.s a2, fa5, fa4
-; RV32ID-ILP32-NEXT: neg s0, a2
-; RV32ID-ILP32-NEXT: neg s1, a1
+; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
+; RV32ID-ILP32-NEXT: lui a1, 391168
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, zero
+; RV32ID-ILP32-NEXT: addi a1, a1, -1
+; RV32ID-ILP32-NEXT: fle.s a2, fa4, fa5
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, a1
+; RV32ID-ILP32-NEXT: flt.s a1, fa4, fa5
+; RV32ID-ILP32-NEXT: neg s0, a1
+; RV32ID-ILP32-NEXT: neg s1, a2
; RV32ID-ILP32-NEXT: call __fixunssfdi
; RV32ID-ILP32-NEXT: and a0, s1, a0
; RV32ID-ILP32-NEXT: and a1, s1, a1
@@ -3144,23 +3175,25 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; RV32ID-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
+; RV32ID-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
; RV32ID-NEXT: call __extendhfsf2
-; RV32ID-NEXT: lui a0, %hi(.LCPI12_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI12_0)(a0)
-; RV32ID-NEXT: fmv.w.x fa4, zero
-; RV32ID-NEXT: fle.s a0, fa4, fa0
-; RV32ID-NEXT: flt.s a1, fa5, fa0
-; RV32ID-NEXT: neg s0, a1
-; RV32ID-NEXT: neg s1, a0
+; RV32ID-NEXT: fmv.s fs0, fa0
+; RV32ID-NEXT: fmv.w.x fa5, zero
+; RV32ID-NEXT: fle.s a0, fa5, fa0
+; RV32ID-NEXT: neg s0, a0
; RV32ID-NEXT: call __fixunssfdi
-; RV32ID-NEXT: and a0, s1, a0
-; RV32ID-NEXT: and a1, s1, a1
-; RV32ID-NEXT: or a0, s0, a0
-; RV32ID-NEXT: or a1, s0, a1
+; RV32ID-NEXT: and a0, s0, a0
+; RV32ID-NEXT: lui a2, 391168
+; RV32ID-NEXT: and a1, s0, a1
+; RV32ID-NEXT: addi a2, a2, -1
+; RV32ID-NEXT: fmv.w.x fa5, a2
+; RV32ID-NEXT: flt.s a2, fa5, fs0
+; RV32ID-NEXT: neg a2, a2
+; RV32ID-NEXT: or a0, a2, a0
+; RV32ID-NEXT: or a1, a2, a1
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
-; RV32ID-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
+; RV32ID-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
; RV32ID-NEXT: ret
;
@@ -3178,30 +3211,32 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; RV64ID-NEXT: addi sp, sp, 16
; RV64ID-NEXT: ret
;
-; CHECK32-IZFHMIN-LABEL: fcvt_lu_h_sat:
-; CHECK32-IZFHMIN: # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT: addi sp, sp, -16
-; CHECK32-IZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
-; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK32-IZFHMIN-NEXT: flw fa5, %lo(.LCPI12_0)(a0)
-; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa0, fa0
-; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, zero
-; CHECK32-IZFHMIN-NEXT: fle.s a0, fa4, fa0
-; CHECK32-IZFHMIN-NEXT: flt.s a1, fa5, fa0
-; CHECK32-IZFHMIN-NEXT: neg s0, a1
-; CHECK32-IZFHMIN-NEXT: neg s1, a0
-; CHECK32-IZFHMIN-NEXT: call __fixunssfdi
-; CHECK32-IZFHMIN-NEXT: and a0, s1, a0
-; CHECK32-IZFHMIN-NEXT: and a1, s1, a1
-; CHECK32-IZFHMIN-NEXT: or a0, s0, a0
-; CHECK32-IZFHMIN-NEXT: or a1, s0, a1
-; CHECK32-IZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; CHECK32-IZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
-; CHECK32-IZFHMIN-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
-; CHECK32-IZFHMIN-NEXT: addi sp, sp, 16
-; CHECK32-IZFHMIN-NEXT: ret
+; RV32IFZFHMIN-LABEL: fcvt_lu_h_sat:
+; RV32IFZFHMIN: # %bb.0: # %start
+; RV32IFZFHMIN-NEXT: addi sp, sp, -16
+; RV32IFZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IFZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IFZFHMIN-NEXT: fsw fs0, 4(sp) # 4-byte Folded Spill
+; RV32IFZFHMIN-NEXT: fcvt.s.h fs0, fa0
+; RV32IFZFHMIN-NEXT: fmv.w.x fa5, zero
+; RV32IFZFHMIN-NEXT: fle.s a0, fa5, fs0
+; RV32IFZFHMIN-NEXT: neg s0, a0
+; RV32IFZFHMIN-NEXT: fmv.s fa0, fs0
+; RV32IFZFHMIN-NEXT: call __fixunssfdi
+; RV32IFZFHMIN-NEXT: and a0, s0, a0
+; RV32IFZFHMIN-NEXT: lui a2, 391168
+; RV32IFZFHMIN-NEXT: and a1, s0, a1
+; RV32IFZFHMIN-NEXT: addi a2, a2, -1
+; RV32IFZFHMIN-NEXT: fmv.w.x fa5, a2
+; RV32IFZFHMIN-NEXT: flt.s a2, fa5, fs0
+; RV32IFZFHMIN-NEXT: neg a2, a2
+; RV32IFZFHMIN-NEXT: or a0, a2, a0
+; RV32IFZFHMIN-NEXT: or a1, a2, a1
+; RV32IFZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IFZFHMIN-NEXT: flw fs0, 4(sp) # 4-byte Folded Reload
+; RV32IFZFHMIN-NEXT: addi sp, sp, 16
+; RV32IFZFHMIN-NEXT: ret
;
; CHECK64-IZFHMIN-LABEL: fcvt_lu_h_sat:
; CHECK64-IZFHMIN: # %bb.0: # %start
@@ -3213,6 +3248,33 @@ define i64 @fcvt_lu_h_sat(half %a) nounwind {
; CHECK64-IZFHMIN-NEXT: and a0, a1, a0
; CHECK64-IZFHMIN-NEXT: ret
;
+; RV32IDZFHMIN-LABEL: fcvt_lu_h_sat:
+; RV32IDZFHMIN: # %bb.0: # %start
+; RV32IDZFHMIN-NEXT: addi sp, sp, -16
+; RV32IDZFHMIN-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32IDZFHMIN-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
+; RV32IDZFHMIN-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill
+; RV32IDZFHMIN-NEXT: fcvt.s.h fs0, fa0
+; RV32IDZFHMIN-NEXT: fmv.w.x fa5, zero
+; RV32IDZFHMIN-NEXT: fle.s a0, fa5, fs0
+; RV32IDZFHMIN-NEXT: neg s0, a0
+; RV32IDZFHMIN-NEXT: fmv.s fa0, fs0
+; RV32IDZFHMIN-NEXT: call __fixunssfdi
+; RV32IDZFHMIN-NEXT: and a0, s0, a0
+; RV32IDZFHMIN-NEXT: lui a2, 391168
+; RV32IDZFHMIN-NEXT: and a1, s0, a1
+; RV32IDZFHMIN-NEXT: addi a2, a2, -1
+; RV32IDZFHMIN-NEXT: fmv.w.x fa5, a2
+; RV32IDZFHMIN-NEXT: flt.s a2, fa5, fs0
+; RV32IDZFHMIN-NEXT: neg a2, a2
+; RV32IDZFHMIN-NEXT: or a0, a2, a0
+; RV32IDZFHMIN-NEXT: or a1, a2, a1
+; RV32IDZFHMIN-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IDZFHMIN-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32IDZFHMIN-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload
+; RV32IDZFHMIN-NEXT: addi sp, sp, 16
+; RV32IDZFHMIN-NEXT: ret
+;
; CHECK32-IZHINXMIN-LABEL: fcvt_lu_h_sat:
; CHECK32-IZHINXMIN: # %bb.0: # %start
; CHECK32-IZHINXMIN-NEXT: addi sp, sp, -16
@@ -6282,13 +6344,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32IZFH-LABEL: fcvt_w_s_sat_i16:
; RV32IZFH: # %bb.0: # %start
; RV32IZFH-NEXT: fcvt.s.h fa5, fa0
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI32_0)
-; RV32IZFH-NEXT: feq.s a1, fa5, fa5
-; RV32IZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; RV32IZFH-NEXT: lui a0, 815104
-; RV32IZFH-NEXT: fmv.w.x fa3, a0
-; RV32IZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV32IZFH-NEXT: neg a0, a1
+; RV32IZFH-NEXT: lui a1, 290816
+; RV32IZFH-NEXT: fmv.w.x fa4, a0
+; RV32IZFH-NEXT: feq.s a0, fa5, fa5
+; RV32IZFH-NEXT: addi a1, a1, -512
+; RV32IZFH-NEXT: neg a0, a0
+; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IZFH-NEXT: fmv.w.x fa4, a1
; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IZFH-NEXT: fcvt.w.s a1, fa5, rtz
; RV32IZFH-NEXT: and a0, a0, a1
@@ -6297,13 +6360,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64IZFH-LABEL: fcvt_w_s_sat_i16:
; RV64IZFH: # %bb.0: # %start
; RV64IZFH-NEXT: fcvt.s.h fa5, fa0
-; RV64IZFH-NEXT: lui a0, %hi(.LCPI32_0)
-; RV64IZFH-NEXT: feq.s a1, fa5, fa5
-; RV64IZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; RV64IZFH-NEXT: lui a0, 815104
-; RV64IZFH-NEXT: fmv.w.x fa3, a0
-; RV64IZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV64IZFH-NEXT: neg a0, a1
+; RV64IZFH-NEXT: lui a1, 290816
+; RV64IZFH-NEXT: fmv.w.x fa4, a0
+; RV64IZFH-NEXT: feq.s a0, fa5, fa5
+; RV64IZFH-NEXT: addi a1, a1, -512
+; RV64IZFH-NEXT: neg a0, a0
+; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IZFH-NEXT: fmv.w.x fa4, a1
; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IZFH-NEXT: fcvt.l.s a1, fa5, rtz
; RV64IZFH-NEXT: and a0, a0, a1
@@ -6312,13 +6376,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32IDZFH-LABEL: fcvt_w_s_sat_i16:
; RV32IDZFH: # %bb.0: # %start
; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0
-; RV32IDZFH-NEXT: lui a0, %hi(.LCPI32_0)
-; RV32IDZFH-NEXT: feq.s a1, fa5, fa5
-; RV32IDZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; RV32IDZFH-NEXT: lui a0, 815104
-; RV32IDZFH-NEXT: fmv.w.x fa3, a0
-; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV32IDZFH-NEXT: neg a0, a1
+; RV32IDZFH-NEXT: lui a1, 290816
+; RV32IDZFH-NEXT: fmv.w.x fa4, a0
+; RV32IDZFH-NEXT: feq.s a0, fa5, fa5
+; RV32IDZFH-NEXT: addi a1, a1, -512
+; RV32IDZFH-NEXT: neg a0, a0
+; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IDZFH-NEXT: fmv.w.x fa4, a1
; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IDZFH-NEXT: fcvt.w.s a1, fa5, rtz
; RV32IDZFH-NEXT: and a0, a0, a1
@@ -6327,13 +6392,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64IDZFH-LABEL: fcvt_w_s_sat_i16:
; RV64IDZFH: # %bb.0: # %start
; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0
-; RV64IDZFH-NEXT: lui a0, %hi(.LCPI32_0)
-; RV64IDZFH-NEXT: feq.s a1, fa5, fa5
-; RV64IDZFH-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; RV64IDZFH-NEXT: lui a0, 815104
-; RV64IDZFH-NEXT: fmv.w.x fa3, a0
-; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa3
-; RV64IDZFH-NEXT: neg a0, a1
+; RV64IDZFH-NEXT: lui a1, 290816
+; RV64IDZFH-NEXT: fmv.w.x fa4, a0
+; RV64IDZFH-NEXT: feq.s a0, fa5, fa5
+; RV64IDZFH-NEXT: addi a1, a1, -512
+; RV64IDZFH-NEXT: neg a0, a0
+; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IDZFH-NEXT: fmv.w.x fa4, a1
; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IDZFH-NEXT: fcvt.l.s a1, fa5, rtz
; RV64IDZFH-NEXT: and a0, a0, a1
@@ -6491,13 +6557,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
-; RV32ID-ILP32-NEXT: lui a0, %hi(.LCPI32_0)
-; RV32ID-ILP32-NEXT: feq.s a1, fa5, fa5
-; RV32ID-ILP32-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; RV32ID-ILP32-NEXT: lui a0, 815104
-; RV32ID-ILP32-NEXT: fmv.w.x fa3, a0
-; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa3
-; RV32ID-ILP32-NEXT: neg a0, a1
+; RV32ID-ILP32-NEXT: lui a1, 290816
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
+; RV32ID-ILP32-NEXT: feq.s a0, fa5, fa5
+; RV32ID-ILP32-NEXT: addi a1, a1, -512
+; RV32ID-ILP32-NEXT: neg a0, a0
+; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, a1
; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-ILP32-NEXT: fcvt.w.s a1, fa5, rtz
; RV32ID-ILP32-NEXT: and a0, a0, a1
@@ -6511,13 +6578,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: call __extendhfsf2
; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
-; RV64ID-LP64-NEXT: lui a0, %hi(.LCPI32_0)
-; RV64ID-LP64-NEXT: feq.s a1, fa5, fa5
-; RV64ID-LP64-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; RV64ID-LP64-NEXT: lui a0, 815104
-; RV64ID-LP64-NEXT: fmv.w.x fa3, a0
-; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa3
-; RV64ID-LP64-NEXT: neg a0, a1
+; RV64ID-LP64-NEXT: lui a1, 290816
+; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
+; RV64ID-LP64-NEXT: feq.s a0, fa5, fa5
+; RV64ID-LP64-NEXT: addi a1, a1, -512
+; RV64ID-LP64-NEXT: neg a0, a0
+; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4
+; RV64ID-LP64-NEXT: fmv.w.x fa4, a1
; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-LP64-NEXT: fcvt.l.s a1, fa5, rtz
; RV64ID-LP64-NEXT: and a0, a0, a1
@@ -6531,13 +6599,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: call __extendhfsf2
; RV32ID-NEXT: feq.s a0, fa0, fa0
-; RV32ID-NEXT: lui a1, %hi(.LCPI32_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI32_0)(a1)
; RV32ID-NEXT: lui a1, 815104
-; RV32ID-NEXT: fmv.w.x fa4, a1
-; RV32ID-NEXT: fmax.s fa4, fa0, fa4
+; RV32ID-NEXT: fmv.w.x fa5, a1
+; RV32ID-NEXT: lui a1, 290816
; RV32ID-NEXT: neg a0, a0
-; RV32ID-NEXT: fmin.s fa5, fa4, fa5
+; RV32ID-NEXT: addi a1, a1, -512
+; RV32ID-NEXT: fmax.s fa5, fa0, fa5
+; RV32ID-NEXT: fmv.w.x fa4, a1
+; RV32ID-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-NEXT: fcvt.w.s a1, fa5, rtz
; RV32ID-NEXT: and a0, a0, a1
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
@@ -6550,13 +6619,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: call __extendhfsf2
; RV64ID-NEXT: feq.s a0, fa0, fa0
-; RV64ID-NEXT: lui a1, %hi(.LCPI32_0)
-; RV64ID-NEXT: flw fa5, %lo(.LCPI32_0)(a1)
; RV64ID-NEXT: lui a1, 815104
-; RV64ID-NEXT: fmv.w.x fa4, a1
-; RV64ID-NEXT: fmax.s fa4, fa0, fa4
+; RV64ID-NEXT: fmv.w.x fa5, a1
+; RV64ID-NEXT: lui a1, 290816
; RV64ID-NEXT: neg a0, a0
-; RV64ID-NEXT: fmin.s fa5, fa4, fa5
+; RV64ID-NEXT: addi a1, a1, -512
+; RV64ID-NEXT: fmax.s fa5, fa0, fa5
+; RV64ID-NEXT: fmv.w.x fa4, a1
+; RV64ID-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-NEXT: fcvt.l.s a1, fa5, rtz
; RV64ID-NEXT: and a0, a0, a1
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
@@ -6566,13 +6636,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; CHECK32-IZFHMIN-LABEL: fcvt_w_s_sat_i16:
; CHECK32-IZFHMIN: # %bb.0: # %start
; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
-; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0)
-; CHECK32-IZFHMIN-NEXT: feq.s a1, fa5, fa5
-; CHECK32-IZFHMIN-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; CHECK32-IZFHMIN-NEXT: lui a0, 815104
-; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, a0
-; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3
-; CHECK32-IZFHMIN-NEXT: neg a0, a1
+; CHECK32-IZFHMIN-NEXT: lui a1, 290816
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK32-IZFHMIN-NEXT: feq.s a0, fa5, fa5
+; CHECK32-IZFHMIN-NEXT: addi a1, a1, -512
+; CHECK32-IZFHMIN-NEXT: neg a0, a0
+; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a1
; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK32-IZFHMIN-NEXT: fcvt.w.s a1, fa5, rtz
; CHECK32-IZFHMIN-NEXT: and a0, a0, a1
@@ -6581,13 +6652,14 @@ define signext i16 @fcvt_w_s_sat_i16(half %a) nounwind {
; CHECK64-IZFHMIN-LABEL: fcvt_w_s_sat_i16:
; CHECK64-IZFHMIN: # %bb.0: # %start
; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
-; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI32_0)
-; CHECK64-IZFHMIN-NEXT: feq.s a1, fa5, fa5
-; CHECK64-IZFHMIN-NEXT: flw fa4, %lo(.LCPI32_0)(a0)
; CHECK64-IZFHMIN-NEXT: lui a0, 815104
-; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, a0
-; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa3
-; CHECK64-IZFHMIN-NEXT: neg a0, a1
+; CHECK64-IZFHMIN-NEXT: lui a1, 290816
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK64-IZFHMIN-NEXT: feq.s a0, fa5, fa5
+; CHECK64-IZFHMIN-NEXT: addi a1, a1, -512
+; CHECK64-IZFHMIN-NEXT: neg a0, a0
+; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a1
; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK64-IZFHMIN-NEXT: fcvt.l.s a1, fa5, rtz
; CHECK64-IZFHMIN-NEXT: and a0, a0, a1
@@ -6802,45 +6874,49 @@ define zeroext i16 @fcvt_wu_s_i16(half %a) nounwind {
define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV32IZFH-LABEL: fcvt_wu_s_sat_i16:
; RV32IZFH: # %bb.0: # %start
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI34_0)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; RV32IZFH-NEXT: fcvt.s.h fa4, fa0
-; RV32IZFH-NEXT: fmv.w.x fa3, zero
-; RV32IZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV32IZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV32IZFH-NEXT: fcvt.s.h fa5, fa0
+; RV32IZFH-NEXT: fmv.w.x fa4, zero
+; RV32IZFH-NEXT: lui a0, 292864
+; RV32IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IZFH-NEXT: addi a0, a0, -256
+; RV32IZFH-NEXT: fmv.w.x fa4, a0
+; RV32IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IZFH-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcvt_wu_s_sat_i16:
; RV64IZFH: # %bb.0: # %start
-; RV64IZFH-NEXT: lui a0, %hi(.LCPI34_0)
-; RV64IZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; RV64IZFH-NEXT: fcvt.s.h fa4, fa0
-; RV64IZFH-NEXT: fmv.w.x fa3, zero
-; RV64IZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV64IZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV64IZFH-NEXT: fcvt.s.h fa5, fa0
+; RV64IZFH-NEXT: fmv.w.x fa4, zero
+; RV64IZFH-NEXT: lui a0, 292864
+; RV64IZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IZFH-NEXT: addi a0, a0, -256
+; RV64IZFH-NEXT: fmv.w.x fa4, a0
+; RV64IZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IZFH-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64IZFH-NEXT: ret
;
; RV32IDZFH-LABEL: fcvt_wu_s_sat_i16:
; RV32IDZFH: # %bb.0: # %start
-; RV32IDZFH-NEXT: lui a0, %hi(.LCPI34_0)
-; RV32IDZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; RV32IDZFH-NEXT: fcvt.s.h fa4, fa0
-; RV32IDZFH-NEXT: fmv.w.x fa3, zero
-; RV32IDZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV32IDZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV32IDZFH-NEXT: fcvt.s.h fa5, fa0
+; RV32IDZFH-NEXT: fmv.w.x fa4, zero
+; RV32IDZFH-NEXT: lui a0, 292864
+; RV32IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV32IDZFH-NEXT: addi a0, a0, -256
+; RV32IDZFH-NEXT: fmv.w.x fa4, a0
+; RV32IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV32IDZFH-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32IDZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_wu_s_sat_i16:
; RV64IDZFH: # %bb.0: # %start
-; RV64IDZFH-NEXT: lui a0, %hi(.LCPI34_0)
-; RV64IDZFH-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; RV64IDZFH-NEXT: fcvt.s.h fa4, fa0
-; RV64IDZFH-NEXT: fmv.w.x fa3, zero
-; RV64IDZFH-NEXT: fmax.s fa4, fa4, fa3
-; RV64IDZFH-NEXT: fmin.s fa5, fa4, fa5
+; RV64IDZFH-NEXT: fcvt.s.h fa5, fa0
+; RV64IDZFH-NEXT: fmv.w.x fa4, zero
+; RV64IDZFH-NEXT: lui a0, 292864
+; RV64IDZFH-NEXT: fmax.s fa5, fa5, fa4
+; RV64IDZFH-NEXT: addi a0, a0, -256
+; RV64IDZFH-NEXT: fmv.w.x fa4, a0
+; RV64IDZFH-NEXT: fmin.s fa5, fa5, fa4
; RV64IDZFH-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64IDZFH-NEXT: ret
;
@@ -6971,12 +7047,13 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV32ID-ILP32-NEXT: addi sp, sp, -16
; RV32ID-ILP32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-ILP32-NEXT: call __extendhfsf2
-; RV32ID-ILP32-NEXT: lui a1, %hi(.LCPI34_0)
-; RV32ID-ILP32-NEXT: flw fa5, %lo(.LCPI34_0)(a1)
+; RV32ID-ILP32-NEXT: fmv.w.x fa5, a0
+; RV32ID-ILP32-NEXT: fmv.w.x fa4, zero
+; RV32ID-ILP32-NEXT: lui a0, 292864
+; RV32ID-ILP32-NEXT: fmax.s fa5, fa5, fa4
+; RV32ID-ILP32-NEXT: addi a0, a0, -256
; RV32ID-ILP32-NEXT: fmv.w.x fa4, a0
-; RV32ID-ILP32-NEXT: fmv.w.x fa3, zero
-; RV32ID-ILP32-NEXT: fmax.s fa4, fa4, fa3
-; RV32ID-ILP32-NEXT: fmin.s fa5, fa4, fa5
+; RV32ID-ILP32-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-ILP32-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-ILP32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-ILP32-NEXT: addi sp, sp, 16
@@ -6987,12 +7064,13 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV64ID-LP64-NEXT: addi sp, sp, -16
; RV64ID-LP64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-LP64-NEXT: call __extendhfsf2
-; RV64ID-LP64-NEXT: lui a1, %hi(.LCPI34_0)
-; RV64ID-LP64-NEXT: flw fa5, %lo(.LCPI34_0)(a1)
+; RV64ID-LP64-NEXT: fmv.w.x fa5, a0
+; RV64ID-LP64-NEXT: fmv.w.x fa4, zero
+; RV64ID-LP64-NEXT: lui a0, 292864
+; RV64ID-LP64-NEXT: fmax.s fa5, fa5, fa4
+; RV64ID-LP64-NEXT: addi a0, a0, -256
; RV64ID-LP64-NEXT: fmv.w.x fa4, a0
-; RV64ID-LP64-NEXT: fmv.w.x fa3, zero
-; RV64ID-LP64-NEXT: fmax.s fa4, fa4, fa3
-; RV64ID-LP64-NEXT: fmin.s fa5, fa4, fa5
+; RV64ID-LP64-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-LP64-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-LP64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-LP64-NEXT: addi sp, sp, 16
@@ -7003,11 +7081,12 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV32ID-NEXT: addi sp, sp, -16
; RV32ID-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ID-NEXT: call __extendhfsf2
-; RV32ID-NEXT: lui a0, %hi(.LCPI34_0)
-; RV32ID-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; RV32ID-NEXT: fmv.w.x fa4, zero
-; RV32ID-NEXT: fmax.s fa4, fa0, fa4
-; RV32ID-NEXT: fmin.s fa5, fa4, fa5
+; RV32ID-NEXT: fmv.w.x fa5, zero
+; RV32ID-NEXT: lui a0, 292864
+; RV32ID-NEXT: fmax.s fa5, fa0, fa5
+; RV32ID-NEXT: addi a0, a0, -256
+; RV32ID-NEXT: fmv.w.x fa4, a0
+; RV32ID-NEXT: fmin.s fa5, fa5, fa4
; RV32ID-NEXT: fcvt.wu.s a0, fa5, rtz
; RV32ID-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ID-NEXT: addi sp, sp, 16
@@ -7018,11 +7097,12 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
; RV64ID-NEXT: addi sp, sp, -16
; RV64ID-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ID-NEXT: call __extendhfsf2
-; RV64ID-NEXT: lui a0, %hi(.LCPI34_0)
-; RV64ID-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; RV64ID-NEXT: fmv.w.x fa4, zero
-; RV64ID-NEXT: fmax.s fa4, fa0, fa4
-; RV64ID-NEXT: fmin.s fa5, fa4, fa5
+; RV64ID-NEXT: fmv.w.x fa5, zero
+; RV64ID-NEXT: lui a0, 292864
+; RV64ID-NEXT: fmax.s fa5, fa0, fa5
+; RV64ID-NEXT: addi a0, a0, -256
+; RV64ID-NEXT: fmv.w.x fa4, a0
+; RV64ID-NEXT: fmin.s fa5, fa5, fa4
; RV64ID-NEXT: fcvt.lu.s a0, fa5, rtz
; RV64ID-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ID-NEXT: addi sp, sp, 16
@@ -7030,23 +7110,25 @@ define zeroext i16 @fcvt_wu_s_sat_i16(half %a) nounwind {
;
; CHECK32-IZFHMIN-LABEL: fcvt_wu_s_sat_i16:
; CHECK32-IZFHMIN: # %bb.0: # %start
-; CHECK32-IZFHMIN-NEXT: lui a0, %hi(.LCPI34_0)
-; CHECK32-IZFHMIN-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa4, fa0
-; CHECK32-IZFHMIN-NEXT: fmv.w.x fa3, zero
-; CHECK32-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3
-; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5
+; CHECK32-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, zero
+; CHECK32-IZFHMIN-NEXT: lui a0, 292864
+; CHECK32-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK32-IZFHMIN-NEXT: addi a0, a0, -256
+; CHECK32-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK32-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK32-IZFHMIN-NEXT: fcvt.wu.s a0, fa5, rtz
; CHECK32-IZFHMIN-NEXT: ret
;
; CHECK64-IZFHMIN-LABEL: fcvt_wu_s_sat_i16:
; CHECK64-IZFHMIN: # %bb.0: # %start
-; CHECK64-IZFHMIN-NEXT: lui a0, %hi(.LCPI34_0)
-; CHECK64-IZFHMIN-NEXT: flw fa5, %lo(.LCPI34_0)(a0)
-; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa4, fa0
-; CHECK64-IZFHMIN-NEXT: fmv.w.x fa3, zero
-; CHECK64-IZFHMIN-NEXT: fmax.s fa4, fa4, fa3
-; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa4, fa5
+; CHECK64-IZFHMIN-NEXT: fcvt.s.h fa5, fa0
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, zero
+; CHECK64-IZFHMIN-NEXT: lui a0, 292864
+; CHECK64-IZFHMIN-NEXT: fmax.s fa5, fa5, fa4
+; CHECK64-IZFHMIN-NEXT: addi a0, a0, -256
+; CHECK64-IZFHMIN-NEXT: fmv.w.x fa4, a0
+; CHECK64-IZFHMIN-NEXT: fmin.s fa5, fa5, fa4
; CHECK64-IZFHMIN-NEXT: fcvt.lu.s a0, fa5, rtz
; CHECK64-IZFHMIN-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/half-imm.ll b/llvm/test/CodeGen/RISCV/half-imm.ll
index d68e19d15b4bb..1dc0da8c04dba 100644
--- a/llvm/test/CodeGen/RISCV/half-imm.ll
+++ b/llvm/test/CodeGen/RISCV/half-imm.ll
@@ -24,8 +24,9 @@
define half @half_imm() nounwind {
; CHECK-LABEL: half_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa0, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: lui a0, 4
+; CHECK-NEXT: addi a0, a0, 512
+; CHECK-NEXT: fmv.h.x fa0, a0
; CHECK-NEXT: ret
;
; RV32IZHINX-LABEL: half_imm:
@@ -44,8 +45,9 @@ define half @half_imm() nounwind {
;
; CHECKIZFHMIN-LABEL: half_imm:
; CHECKIZFHMIN: # %bb.0:
-; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECKIZFHMIN-NEXT: flh fa0, %lo(.LCPI0_0)(a0)
+; CHECKIZFHMIN-NEXT: lui a0, 4
+; CHECKIZFHMIN-NEXT: addi a0, a0, 512
+; CHECKIZFHMIN-NEXT: fmv.h.x fa0, a0
; CHECKIZFHMIN-NEXT: ret
;
; CHECKIZHINXMIN-LABEL: half_imm:
@@ -60,8 +62,9 @@ define half @half_imm() nounwind {
define half @half_imm_op(half %a) nounwind {
; CHECK-LABEL: half_imm_op:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 15
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: fadd.h fa0, fa0, fa5
; CHECK-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/half-intrinsics.ll b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
index 4f0026175e7c7..e16d788f66ede 100644
--- a/llvm/test/CodeGen/RISCV/half-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/half-intrinsics.ll
@@ -2222,8 +2222,9 @@ declare half @llvm.floor.f16(half)
define half @floor_f16(half %a) nounwind {
; CHECKIZFH-LABEL: floor_f16:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB18_2
@@ -2313,8 +2314,9 @@ declare half @llvm.ceil.f16(half)
define half @ceil_f16(half %a) nounwind {
; CHECKIZFH-LABEL: ceil_f16:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI19_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB19_2
@@ -2404,8 +2406,9 @@ declare half @llvm.trunc.f16(half)
define half @trunc_f16(half %a) nounwind {
; CHECKIZFH-LABEL: trunc_f16:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB20_2
@@ -2495,8 +2498,9 @@ declare half @llvm.rint.f16(half)
define half @rint_f16(half %a) nounwind {
; CHECKIZFH-LABEL: rint_f16:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI21_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI21_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB21_2
@@ -2706,8 +2710,9 @@ declare half @llvm.round.f16(half)
define half @round_f16(half %a) nounwind {
; CHECKIZFH-LABEL: round_f16:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI23_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI23_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB23_2
@@ -2797,8 +2802,9 @@ declare half @llvm.roundeven.f16(half)
define half @roundeven_f16(half %a) nounwind {
; CHECKIZFH-LABEL: roundeven_f16:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI24_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB24_2
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
index 3b645bf8aef91..c815bc19e280c 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv-sat.ll
@@ -95,8 +95,9 @@ define signext i32 @test_floor_si32(half %x) {
define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-LABEL: test_floor_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI1_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB1_2
@@ -121,8 +122,9 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFH-NEXT: # %bb.3:
; RV32IZFH-NEXT: mv a2, a1
; RV32IZFH-NEXT: .LBB1_4:
-; RV32IZFH-NEXT: lui a1, %hi(.LCPI1_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI1_1)(a1)
+; RV32IZFH-NEXT: lui a1, 389120
+; RV32IZFH-NEXT: addi a1, a1, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a1
; RV32IZFH-NEXT: flt.s a1, fa5, fs0
; RV32IZFH-NEXT: beqz a1, .LBB1_6
; RV32IZFH-NEXT: # %bb.5:
@@ -248,8 +250,9 @@ define i64 @test_floor_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: # %bb.3:
; RV32IZFHMIN-NEXT: mv a2, a1
; RV32IZFHMIN-NEXT: .LBB1_4:
-; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI1_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI1_0)(a1)
+; RV32IZFHMIN-NEXT: lui a1, 389120
+; RV32IZFHMIN-NEXT: addi a1, a1, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IZFHMIN-NEXT: beqz a1, .LBB1_6
; RV32IZFHMIN-NEXT: # %bb.5:
@@ -506,8 +509,9 @@ define signext i32 @test_floor_ui32(half %x) {
define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZFH-LABEL: test_floor_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB3_2
@@ -526,10 +530,11 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
; RV32IZFH-NEXT: call __fixunssfdi
-; RV32IZFH-NEXT: lui a2, %hi(.LCPI3_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI3_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
+; RV32IZFH-NEXT: lui a2, 391168
; RV32IZFH-NEXT: and a1, s0, a1
+; RV32IZFH-NEXT: addi a2, a2, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a2
; RV32IZFH-NEXT: flt.s a2, fa5, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: or a0, a2, a0
@@ -627,10 +632,11 @@ define i64 @test_floor_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
; RV32IZFHMIN-NEXT: call __fixunssfdi
-; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI3_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI3_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
+; RV32IZFHMIN-NEXT: lui a2, 391168
; RV32IZFHMIN-NEXT: and a1, s0, a1
+; RV32IZFHMIN-NEXT: addi a2, a2, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2
; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: or a0, a2, a0
@@ -803,8 +809,9 @@ define signext i32 @test_ceil_si32(half %x) {
define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-LABEL: test_ceil_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI5_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB5_2
@@ -829,8 +836,9 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFH-NEXT: # %bb.3:
; RV32IZFH-NEXT: mv a2, a1
; RV32IZFH-NEXT: .LBB5_4:
-; RV32IZFH-NEXT: lui a1, %hi(.LCPI5_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI5_1)(a1)
+; RV32IZFH-NEXT: lui a1, 389120
+; RV32IZFH-NEXT: addi a1, a1, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a1
; RV32IZFH-NEXT: flt.s a1, fa5, fs0
; RV32IZFH-NEXT: beqz a1, .LBB5_6
; RV32IZFH-NEXT: # %bb.5:
@@ -956,8 +964,9 @@ define i64 @test_ceil_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: # %bb.3:
; RV32IZFHMIN-NEXT: mv a2, a1
; RV32IZFHMIN-NEXT: .LBB5_4:
-; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI5_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI5_0)(a1)
+; RV32IZFHMIN-NEXT: lui a1, 389120
+; RV32IZFHMIN-NEXT: addi a1, a1, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IZFHMIN-NEXT: beqz a1, .LBB5_6
; RV32IZFHMIN-NEXT: # %bb.5:
@@ -1214,8 +1223,9 @@ define signext i32 @test_ceil_ui32(half %x) {
define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZFH-LABEL: test_ceil_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI7_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB7_2
@@ -1234,10 +1244,11 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
; RV32IZFH-NEXT: call __fixunssfdi
-; RV32IZFH-NEXT: lui a2, %hi(.LCPI7_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI7_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
+; RV32IZFH-NEXT: lui a2, 391168
; RV32IZFH-NEXT: and a1, s0, a1
+; RV32IZFH-NEXT: addi a2, a2, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a2
; RV32IZFH-NEXT: flt.s a2, fa5, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: or a0, a2, a0
@@ -1335,10 +1346,11 @@ define i64 @test_ceil_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
; RV32IZFHMIN-NEXT: call __fixunssfdi
-; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI7_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI7_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
+; RV32IZFHMIN-NEXT: lui a2, 391168
; RV32IZFHMIN-NEXT: and a1, s0, a1
+; RV32IZFHMIN-NEXT: addi a2, a2, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2
; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: or a0, a2, a0
@@ -1511,8 +1523,9 @@ define signext i32 @test_trunc_si32(half %x) {
define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-LABEL: test_trunc_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI9_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB9_2
@@ -1537,8 +1550,9 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFH-NEXT: # %bb.3:
; RV32IZFH-NEXT: mv a2, a1
; RV32IZFH-NEXT: .LBB9_4:
-; RV32IZFH-NEXT: lui a1, %hi(.LCPI9_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI9_1)(a1)
+; RV32IZFH-NEXT: lui a1, 389120
+; RV32IZFH-NEXT: addi a1, a1, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a1
; RV32IZFH-NEXT: flt.s a1, fa5, fs0
; RV32IZFH-NEXT: beqz a1, .LBB9_6
; RV32IZFH-NEXT: # %bb.5:
@@ -1664,8 +1678,9 @@ define i64 @test_trunc_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: # %bb.3:
; RV32IZFHMIN-NEXT: mv a2, a1
; RV32IZFHMIN-NEXT: .LBB9_4:
-; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI9_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI9_0)(a1)
+; RV32IZFHMIN-NEXT: lui a1, 389120
+; RV32IZFHMIN-NEXT: addi a1, a1, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IZFHMIN-NEXT: beqz a1, .LBB9_6
; RV32IZFHMIN-NEXT: # %bb.5:
@@ -1922,8 +1937,9 @@ define signext i32 @test_trunc_ui32(half %x) {
define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZFH-LABEL: test_trunc_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI11_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB11_2
@@ -1942,10 +1958,11 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
; RV32IZFH-NEXT: call __fixunssfdi
-; RV32IZFH-NEXT: lui a2, %hi(.LCPI11_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI11_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
+; RV32IZFH-NEXT: lui a2, 391168
; RV32IZFH-NEXT: and a1, s0, a1
+; RV32IZFH-NEXT: addi a2, a2, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a2
; RV32IZFH-NEXT: flt.s a2, fa5, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: or a0, a2, a0
@@ -2043,10 +2060,11 @@ define i64 @test_trunc_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
; RV32IZFHMIN-NEXT: call __fixunssfdi
-; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI11_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI11_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
+; RV32IZFHMIN-NEXT: lui a2, 391168
; RV32IZFHMIN-NEXT: and a1, s0, a1
+; RV32IZFHMIN-NEXT: addi a2, a2, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2
; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: or a0, a2, a0
@@ -2219,8 +2237,9 @@ define signext i32 @test_round_si32(half %x) {
define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-LABEL: test_round_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI13_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI13_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB13_2
@@ -2245,8 +2264,9 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFH-NEXT: # %bb.3:
; RV32IZFH-NEXT: mv a2, a1
; RV32IZFH-NEXT: .LBB13_4:
-; RV32IZFH-NEXT: lui a1, %hi(.LCPI13_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI13_1)(a1)
+; RV32IZFH-NEXT: lui a1, 389120
+; RV32IZFH-NEXT: addi a1, a1, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a1
; RV32IZFH-NEXT: flt.s a1, fa5, fs0
; RV32IZFH-NEXT: beqz a1, .LBB13_6
; RV32IZFH-NEXT: # %bb.5:
@@ -2372,8 +2392,9 @@ define i64 @test_round_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: # %bb.3:
; RV32IZFHMIN-NEXT: mv a2, a1
; RV32IZFHMIN-NEXT: .LBB13_4:
-; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI13_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI13_0)(a1)
+; RV32IZFHMIN-NEXT: lui a1, 389120
+; RV32IZFHMIN-NEXT: addi a1, a1, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IZFHMIN-NEXT: beqz a1, .LBB13_6
; RV32IZFHMIN-NEXT: # %bb.5:
@@ -2630,8 +2651,9 @@ define signext i32 @test_round_ui32(half %x) {
define i64 @test_round_ui64(half %x) nounwind {
; RV32IZFH-LABEL: test_round_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI15_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI15_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB15_2
@@ -2650,10 +2672,11 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
; RV32IZFH-NEXT: call __fixunssfdi
-; RV32IZFH-NEXT: lui a2, %hi(.LCPI15_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI15_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
+; RV32IZFH-NEXT: lui a2, 391168
; RV32IZFH-NEXT: and a1, s0, a1
+; RV32IZFH-NEXT: addi a2, a2, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a2
; RV32IZFH-NEXT: flt.s a2, fa5, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: or a0, a2, a0
@@ -2751,10 +2774,11 @@ define i64 @test_round_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
; RV32IZFHMIN-NEXT: call __fixunssfdi
-; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI15_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI15_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
+; RV32IZFHMIN-NEXT: lui a2, 391168
; RV32IZFHMIN-NEXT: and a1, s0, a1
+; RV32IZFHMIN-NEXT: addi a2, a2, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2
; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: or a0, a2, a0
@@ -2927,8 +2951,9 @@ define signext i32 @test_roundeven_si32(half %x) {
define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-LABEL: test_roundeven_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI17_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI17_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB17_2
@@ -2953,8 +2978,9 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFH-NEXT: # %bb.3:
; RV32IZFH-NEXT: mv a2, a1
; RV32IZFH-NEXT: .LBB17_4:
-; RV32IZFH-NEXT: lui a1, %hi(.LCPI17_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI17_1)(a1)
+; RV32IZFH-NEXT: lui a1, 389120
+; RV32IZFH-NEXT: addi a1, a1, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a1
; RV32IZFH-NEXT: flt.s a1, fa5, fs0
; RV32IZFH-NEXT: beqz a1, .LBB17_6
; RV32IZFH-NEXT: # %bb.5:
@@ -3080,8 +3106,9 @@ define i64 @test_roundeven_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: # %bb.3:
; RV32IZFHMIN-NEXT: mv a2, a1
; RV32IZFHMIN-NEXT: .LBB17_4:
-; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI17_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI17_0)(a1)
+; RV32IZFHMIN-NEXT: lui a1, 389120
+; RV32IZFHMIN-NEXT: addi a1, a1, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IZFHMIN-NEXT: beqz a1, .LBB17_6
; RV32IZFHMIN-NEXT: # %bb.5:
@@ -3338,8 +3365,9 @@ define signext i32 @test_roundeven_ui32(half %x) {
define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZFH-LABEL: test_roundeven_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI19_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI19_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB19_2
@@ -3358,10 +3386,11 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
; RV32IZFH-NEXT: call __fixunssfdi
-; RV32IZFH-NEXT: lui a2, %hi(.LCPI19_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI19_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
+; RV32IZFH-NEXT: lui a2, 391168
; RV32IZFH-NEXT: and a1, s0, a1
+; RV32IZFH-NEXT: addi a2, a2, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a2
; RV32IZFH-NEXT: flt.s a2, fa5, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: or a0, a2, a0
@@ -3459,10 +3488,11 @@ define i64 @test_roundeven_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
; RV32IZFHMIN-NEXT: call __fixunssfdi
-; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI19_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI19_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
+; RV32IZFHMIN-NEXT: lui a2, 391168
; RV32IZFHMIN-NEXT: and a1, s0, a1
+; RV32IZFHMIN-NEXT: addi a2, a2, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2
; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: or a0, a2, a0
@@ -3635,8 +3665,9 @@ define signext i32 @test_rint_si32(half %x) {
define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-LABEL: test_rint_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI21_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI21_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB21_2
@@ -3661,8 +3692,9 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFH-NEXT: # %bb.3:
; RV32IZFH-NEXT: mv a2, a1
; RV32IZFH-NEXT: .LBB21_4:
-; RV32IZFH-NEXT: lui a1, %hi(.LCPI21_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI21_1)(a1)
+; RV32IZFH-NEXT: lui a1, 389120
+; RV32IZFH-NEXT: addi a1, a1, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a1
; RV32IZFH-NEXT: flt.s a1, fa5, fs0
; RV32IZFH-NEXT: beqz a1, .LBB21_6
; RV32IZFH-NEXT: # %bb.5:
@@ -3788,8 +3820,9 @@ define i64 @test_rint_si64(half %x) nounwind {
; RV32IZFHMIN-NEXT: # %bb.3:
; RV32IZFHMIN-NEXT: mv a2, a1
; RV32IZFHMIN-NEXT: .LBB21_4:
-; RV32IZFHMIN-NEXT: lui a1, %hi(.LCPI21_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI21_0)(a1)
+; RV32IZFHMIN-NEXT: lui a1, 389120
+; RV32IZFHMIN-NEXT: addi a1, a1, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a1
; RV32IZFHMIN-NEXT: flt.s a1, fa5, fs0
; RV32IZFHMIN-NEXT: beqz a1, .LBB21_6
; RV32IZFHMIN-NEXT: # %bb.5:
@@ -4046,8 +4079,9 @@ define signext i32 @test_rint_ui32(half %x) {
define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZFH-LABEL: test_rint_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI23_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI23_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB23_2
@@ -4066,10 +4100,11 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZFH-NEXT: neg s0, a0
; RV32IZFH-NEXT: fmv.s fa0, fs0
; RV32IZFH-NEXT: call __fixunssfdi
-; RV32IZFH-NEXT: lui a2, %hi(.LCPI23_1)
-; RV32IZFH-NEXT: flw fa5, %lo(.LCPI23_1)(a2)
; RV32IZFH-NEXT: and a0, s0, a0
+; RV32IZFH-NEXT: lui a2, 391168
; RV32IZFH-NEXT: and a1, s0, a1
+; RV32IZFH-NEXT: addi a2, a2, -1
+; RV32IZFH-NEXT: fmv.w.x fa5, a2
; RV32IZFH-NEXT: flt.s a2, fa5, fs0
; RV32IZFH-NEXT: neg a2, a2
; RV32IZFH-NEXT: or a0, a2, a0
@@ -4167,10 +4202,11 @@ define i64 @test_rint_ui64(half %x) nounwind {
; RV32IZFHMIN-NEXT: neg s0, a0
; RV32IZFHMIN-NEXT: fmv.s fa0, fs0
; RV32IZFHMIN-NEXT: call __fixunssfdi
-; RV32IZFHMIN-NEXT: lui a2, %hi(.LCPI23_0)
-; RV32IZFHMIN-NEXT: flw fa5, %lo(.LCPI23_0)(a2)
; RV32IZFHMIN-NEXT: and a0, s0, a0
+; RV32IZFHMIN-NEXT: lui a2, 391168
; RV32IZFHMIN-NEXT: and a1, s0, a1
+; RV32IZFHMIN-NEXT: addi a2, a2, -1
+; RV32IZFHMIN-NEXT: fmv.w.x fa5, a2
; RV32IZFHMIN-NEXT: flt.s a2, fa5, fs0
; RV32IZFHMIN-NEXT: neg a2, a2
; RV32IZFHMIN-NEXT: or a0, a2, a0
diff --git a/llvm/test/CodeGen/RISCV/half-round-conv.ll b/llvm/test/CodeGen/RISCV/half-round-conv.ll
index 8a787ee578990..cfc997d66ec56 100644
--- a/llvm/test/CodeGen/RISCV/half-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/half-round-conv.ll
@@ -309,8 +309,9 @@ define signext i32 @test_floor_si32(half %x) {
define i64 @test_floor_si64(half %x) {
; RV32IZFH-LABEL: test_floor_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI3_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB3_2
@@ -754,8 +755,9 @@ define signext i32 @test_floor_ui32(half %x) {
define i64 @test_floor_ui64(half %x) {
; RV32IZFH-LABEL: test_floor_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI7_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB7_2
@@ -1199,8 +1201,9 @@ define signext i32 @test_ceil_si32(half %x) {
define i64 @test_ceil_si64(half %x) {
; RV32IZFH-LABEL: test_ceil_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI11_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB11_2
@@ -1644,8 +1647,9 @@ define signext i32 @test_ceil_ui32(half %x) {
define i64 @test_ceil_ui64(half %x) {
; RV32IZFH-LABEL: test_ceil_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI15_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI15_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB15_2
@@ -2089,8 +2093,9 @@ define signext i32 @test_trunc_si32(half %x) {
define i64 @test_trunc_si64(half %x) {
; RV32IZFH-LABEL: test_trunc_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI19_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI19_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB19_2
@@ -2534,8 +2539,9 @@ define signext i32 @test_trunc_ui32(half %x) {
define i64 @test_trunc_ui64(half %x) {
; RV32IZFH-LABEL: test_trunc_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI23_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI23_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB23_2
@@ -2979,8 +2985,9 @@ define signext i32 @test_round_si32(half %x) {
define i64 @test_round_si64(half %x) {
; RV32IZFH-LABEL: test_round_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI27_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI27_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB27_2
@@ -3424,8 +3431,9 @@ define signext i32 @test_round_ui32(half %x) {
define i64 @test_round_ui64(half %x) {
; RV32IZFH-LABEL: test_round_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI31_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI31_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB31_2
@@ -3869,8 +3877,9 @@ define signext i32 @test_roundeven_si32(half %x) {
define i64 @test_roundeven_si64(half %x) {
; RV32IZFH-LABEL: test_roundeven_si64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI35_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI35_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB35_2
@@ -4314,8 +4323,9 @@ define signext i32 @test_roundeven_ui32(half %x) {
define i64 @test_roundeven_ui64(half %x) {
; RV32IZFH-LABEL: test_roundeven_ui64:
; RV32IZFH: # %bb.0:
-; RV32IZFH-NEXT: lui a0, %hi(.LCPI39_0)
-; RV32IZFH-NEXT: flh fa5, %lo(.LCPI39_0)(a0)
+; RV32IZFH-NEXT: li a0, 25
+; RV32IZFH-NEXT: slli a0, a0, 10
+; RV32IZFH-NEXT: fmv.h.x fa5, a0
; RV32IZFH-NEXT: fabs.h fa4, fa0
; RV32IZFH-NEXT: flt.h a0, fa4, fa5
; RV32IZFH-NEXT: beqz a0, .LBB39_2
@@ -4490,8 +4500,9 @@ define half @test_floor_half(half %x) {
; RV64IFD-NEXT: ret
; CHECKIZFH-LABEL: test_floor_half:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI40_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB40_2
@@ -4574,8 +4585,9 @@ define half @test_ceil_half(half %x) {
; RV64IFD-NEXT: ret
; CHECKIZFH-LABEL: test_ceil_half:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI41_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI41_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB41_2
@@ -4658,8 +4670,9 @@ define half @test_trunc_half(half %x) {
; RV64IFD-NEXT: ret
; CHECKIZFH-LABEL: test_trunc_half:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI42_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB42_2
@@ -4742,8 +4755,9 @@ define half @test_round_half(half %x) {
; RV64IFD-NEXT: ret
; CHECKIZFH-LABEL: test_round_half:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI43_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI43_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB43_2
@@ -4826,8 +4840,9 @@ define half @test_roundeven_half(half %x) {
; RV64IFD-NEXT: ret
; CHECKIZFH-LABEL: test_roundeven_half:
; CHECKIZFH: # %bb.0:
-; CHECKIZFH-NEXT: lui a0, %hi(.LCPI44_0)
-; CHECKIZFH-NEXT: flh fa5, %lo(.LCPI44_0)(a0)
+; CHECKIZFH-NEXT: li a0, 25
+; CHECKIZFH-NEXT: slli a0, a0, 10
+; CHECKIZFH-NEXT: fmv.h.x fa5, a0
; CHECKIZFH-NEXT: fabs.h fa4, fa0
; CHECKIZFH-NEXT: flt.h a0, fa4, fa5
; CHECKIZFH-NEXT: beqz a0, .LBB44_2
diff --git a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
index bf535b1cbd084..e9699502ed3a9 100644
--- a/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-select-fcmp.ll
@@ -878,8 +878,9 @@ define signext i32 @select_fcmp_uge_1_2(half %a, half %b) nounwind {
define half @CascadedSelect(half noundef %a) {
; CHECK-LABEL: CascadedSelect:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
+; CHECK-NEXT: li a0, 15
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: flt.h a0, fa5, fa0
; CHECK-NEXT: bnez a0, .LBB20_3
; CHECK-NEXT: # %bb.1: # %entry
@@ -910,23 +911,24 @@ define half @CascadedSelect(half noundef %a) {
;
; CHECKIZFHMIN-LABEL: CascadedSelect:
; CHECKIZFHMIN: # %bb.0: # %entry
-; CHECKIZFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECKIZFHMIN-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
-; CHECKIZFHMIN-NEXT: fcvt.s.h fa3, fa5
-; CHECKIZFHMIN-NEXT: fcvt.s.h fa4, fa0
-; CHECKIZFHMIN-NEXT: flt.s a0, fa3, fa4
-; CHECKIZFHMIN-NEXT: bnez a0, .LBB20_3
-; CHECKIZFHMIN-NEXT: # %bb.1: # %entry
-; CHECKIZFHMIN-NEXT: fmv.w.x fa5, zero
+; CHECKIZFHMIN-NEXT: fcvt.s.h fa5, fa0
+; CHECKIZFHMIN-NEXT: lui a0, 260096
+; CHECKIZFHMIN-NEXT: fmv.w.x fa4, zero
+; CHECKIZFHMIN-NEXT: flt.s a1, fa5, fa4
+; CHECKIZFHMIN-NEXT: fmv.w.x fa4, a0
; CHECKIZFHMIN-NEXT: flt.s a0, fa4, fa5
+; CHECKIZFHMIN-NEXT: bnez a1, .LBB20_3
+; CHECKIZFHMIN-NEXT: # %bb.1: # %entry
; CHECKIZFHMIN-NEXT: bnez a0, .LBB20_4
-; CHECKIZFHMIN-NEXT: # %bb.2: # %entry
-; CHECKIZFHMIN-NEXT: fmv.s fa5, fa0
-; CHECKIZFHMIN-NEXT: .LBB20_3: # %entry
-; CHECKIZFHMIN-NEXT: fmv.s fa0, fa5
+; CHECKIZFHMIN-NEXT: .LBB20_2: # %entry
; CHECKIZFHMIN-NEXT: ret
-; CHECKIZFHMIN-NEXT: .LBB20_4:
+; CHECKIZFHMIN-NEXT: .LBB20_3:
; CHECKIZFHMIN-NEXT: fmv.h.x fa0, zero
+; CHECKIZFHMIN-NEXT: beqz a0, .LBB20_2
+; CHECKIZFHMIN-NEXT: .LBB20_4:
+; CHECKIZFHMIN-NEXT: li a0, 15
+; CHECKIZFHMIN-NEXT: slli a0, a0, 10
+; CHECKIZFHMIN-NEXT: fmv.h.x fa0, a0
; CHECKIZFHMIN-NEXT: ret
;
; CHECKIZHINXMIN-LABEL: CascadedSelect:
diff --git a/llvm/test/CodeGen/RISCV/half-zfa-fli.ll b/llvm/test/CodeGen/RISCV/half-zfa-fli.ll
index 281a873235623..928535d79f02c 100644
--- a/llvm/test/CodeGen/RISCV/half-zfa-fli.ll
+++ b/llvm/test/CodeGen/RISCV/half-zfa-fli.ll
@@ -16,8 +16,9 @@ define half @loadfpimm1() {
;
; ZFHMIN-LABEL: loadfpimm1:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI0_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI0_0)(a0)
+; ZFHMIN-NEXT: li a0, 11
+; ZFHMIN-NEXT: slli a0, a0, 10
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 0.0625
}
@@ -30,8 +31,9 @@ define half @loadfpimm2() {
;
; ZFHMIN-LABEL: loadfpimm2:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI1_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI1_0)(a0)
+; ZFHMIN-NEXT: li a0, 29
+; ZFHMIN-NEXT: slli a0, a0, 9
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 0.75
}
@@ -44,8 +46,9 @@ define half @loadfpimm3() {
;
; ZFHMIN-LABEL: loadfpimm3:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI2_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI2_0)(a0)
+; ZFHMIN-NEXT: lui a0, 4
+; ZFHMIN-NEXT: addi a0, a0, -768
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 1.25
}
@@ -58,8 +61,9 @@ define half @loadfpimm4() {
;
; ZFHMIN-LABEL: loadfpimm4:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI3_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI3_0)(a0)
+; ZFHMIN-NEXT: lui a0, 4
+; ZFHMIN-NEXT: addi a0, a0, 512
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 3.0
}
@@ -72,8 +76,9 @@ define half @loadfpimm5() {
;
; ZFHMIN-LABEL: loadfpimm5:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI4_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI4_0)(a0)
+; ZFHMIN-NEXT: li a0, 23
+; ZFHMIN-NEXT: slli a0, a0, 10
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 256.0
}
@@ -86,8 +91,9 @@ define half @loadfpimm6() {
;
; ZFHMIN-LABEL: loadfpimm6:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI5_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI5_0)(a0)
+; ZFHMIN-NEXT: li a0, 31
+; ZFHMIN-NEXT: slli a0, a0, 10
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 0xH7C00
}
@@ -100,8 +106,9 @@ define half @loadfpimm7() {
;
; ZFHMIN-LABEL: loadfpimm7:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI6_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI6_0)(a0)
+; ZFHMIN-NEXT: lui a0, 8
+; ZFHMIN-NEXT: addi a0, a0, -512
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 0xH7E00
}
@@ -123,14 +130,16 @@ define half @loadfpimm8() {
define half @loadfpimm9() {
; CHECK-LABEL: loadfpimm9:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
-; CHECK-NEXT: flh fa0, %lo(.LCPI8_0)(a0)
+; CHECK-NEXT: lui a0, 6
+; CHECK-NEXT: addi a0, a0, -1032
+; CHECK-NEXT: fmv.h.x fa0, a0
; CHECK-NEXT: ret
;
; ZFHMIN-LABEL: loadfpimm9:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI8_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI8_0)(a0)
+; ZFHMIN-NEXT: lui a0, 6
+; ZFHMIN-NEXT: addi a0, a0, -1032
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 255.0
}
@@ -169,14 +178,16 @@ define half @loadfpimm11() {
define half @loadfpimm12() {
; CHECK-LABEL: loadfpimm12:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: flh fa0, %lo(.LCPI11_0)(a0)
+; CHECK-NEXT: lui a0, 8
+; CHECK-NEXT: addi a0, a0, -1023
+; CHECK-NEXT: fmv.h.x fa0, a0
; CHECK-NEXT: ret
;
; ZFHMIN-LABEL: loadfpimm12:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI11_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI11_0)(a0)
+; ZFHMIN-NEXT: lui a0, 8
+; ZFHMIN-NEXT: addi a0, a0, -1023
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 0xH7c01
}
@@ -189,8 +200,9 @@ define half @loadfpimm13() {
;
; ZFHMIN-LABEL: loadfpimm13:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI12_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI12_0)(a0)
+; ZFHMIN-NEXT: li a0, -17
+; ZFHMIN-NEXT: slli a0, a0, 10
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half -1.0
}
@@ -222,8 +234,9 @@ define half @loadfpimm15() {
;
; ZFHMIN-LABEL: loadfpimm15:
; ZFHMIN: # %bb.0:
-; ZFHMIN-NEXT: lui a0, %hi(.LCPI14_0)
-; ZFHMIN-NEXT: flh fa0, %lo(.LCPI14_0)(a0)
+; ZFHMIN-NEXT: li a0, -31
+; ZFHMIN-NEXT: slli a0, a0, 10
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
ret half 0xH8400
}
diff --git a/llvm/test/CodeGen/RISCV/half-zfa.ll b/llvm/test/CodeGen/RISCV/half-zfa.ll
index 960c7c4a73e4f..90c66e7fe2ca4 100644
--- a/llvm/test/CodeGen/RISCV/half-zfa.ll
+++ b/llvm/test/CodeGen/RISCV/half-zfa.ll
@@ -350,12 +350,15 @@ define half @select_loadfpimm(half %x) nounwind {
; ZFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZFHMIN-NEXT: fmv.w.x fa4, zero
; ZFHMIN-NEXT: fle.s a0, fa4, fa5
-; ZFHMIN-NEXT: xori a0, a0, 1
-; ZFHMIN-NEXT: slli a0, a0, 1
-; ZFHMIN-NEXT: lui a1, %hi(.LCPI17_0)
-; ZFHMIN-NEXT: addi a1, a1, %lo(.LCPI17_0)
-; ZFHMIN-NEXT: add a0, a1, a0
-; ZFHMIN-NEXT: flh fa0, 0(a0)
+; ZFHMIN-NEXT: beqz a0, .LBB17_2
+; ZFHMIN-NEXT: # %bb.1: # %entry
+; ZFHMIN-NEXT: li a0, 7
+; ZFHMIN-NEXT: j .LBB17_3
+; ZFHMIN-NEXT: .LBB17_2:
+; ZFHMIN-NEXT: li a0, -9
+; ZFHMIN-NEXT: .LBB17_3: # %entry
+; ZFHMIN-NEXT: slli a0, a0, 11
+; ZFHMIN-NEXT: fmv.h.x fa0, a0
; ZFHMIN-NEXT: ret
entry:
%cmp = fcmp ult half %x, 0.000000e+00
diff --git a/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll b/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll
index f183c936fc672..f3b4319ccc4fa 100644
--- a/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll
+++ b/llvm/test/CodeGen/RISCV/repeated-fp-divisors.ll
@@ -17,8 +17,9 @@ entry:
define void @two_fdivs(double %a0, double %a1, double %a2, ptr %res) {
; CHECK-LABEL: two_fdivs:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI1_0)(a1)
+; CHECK-NEXT: li a1, 1023
+; CHECK-NEXT: slli a1, a1, 52
+; CHECK-NEXT: fmv.d.x fa5, a1
; CHECK-NEXT: fdiv.d fa5, fa5, fa0
; CHECK-NEXT: fmul.d fa4, fa1, fa5
; CHECK-NEXT: fmul.d fa5, fa2, fa5
diff --git a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
index caa6c2f8ff96f..a919452389c43 100644
--- a/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-double-convert.ll
@@ -122,9 +122,10 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind {
; RV64ID-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64ID-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64ID-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill
-; RV64ID-NEXT: lui a0, %hi(.LCPI4_0)
-; RV64ID-NEXT: fld fa5, %lo(.LCPI4_0)(a0)
; RV64ID-NEXT: fmv.d fs0, fa0
+; RV64ID-NEXT: li a0, -449
+; RV64ID-NEXT: slli a0, a0, 53
+; RV64ID-NEXT: fmv.d.x fa5, a0
; RV64ID-NEXT: fle.d s0, fa5, fa0
; RV64ID-NEXT: call __fixdfti
; RV64ID-NEXT: li a2, -1
@@ -132,8 +133,8 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind {
; RV64ID-NEXT: # %bb.1:
; RV64ID-NEXT: slli a1, a2, 63
; RV64ID-NEXT: .LBB4_2:
-; RV64ID-NEXT: lui a3, %hi(.LCPI4_1)
-; RV64ID-NEXT: fld fa5, %lo(.LCPI4_1)(a3)
+; RV64ID-NEXT: lui a3, %hi(.LCPI4_0)
+; RV64ID-NEXT: fld fa5, %lo(.LCPI4_0)(a3)
; RV64ID-NEXT: flt.d a3, fa5, fs0
; RV64ID-NEXT: beqz a3, .LBB4_4
; RV64ID-NEXT: # %bb.3:
@@ -170,16 +171,17 @@ define i128 @fptosi_sat_f64_to_i128(double %a) nounwind {
; RV64IDINX-NEXT: # %bb.1:
; RV64IDINX-NEXT: slli a1, a2, 63
; RV64IDINX-NEXT: .LBB4_2:
-; RV64IDINX-NEXT: lui a3, %hi(.LCPI4_0)
-; RV64IDINX-NEXT: ld a3, %lo(.LCPI4_0)(a3)
+; RV64IDINX-NEXT: li a3, 575
+; RV64IDINX-NEXT: slli a3, a3, 53
+; RV64IDINX-NEXT: addi a3, a3, -1
; RV64IDINX-NEXT: flt.d a3, a3, s0
; RV64IDINX-NEXT: beqz a3, .LBB4_4
; RV64IDINX-NEXT: # %bb.3:
; RV64IDINX-NEXT: srli a1, a2, 1
; RV64IDINX-NEXT: .LBB4_4:
; RV64IDINX-NEXT: feq.d a2, s0, s0
-; RV64IDINX-NEXT: neg a3, a3
; RV64IDINX-NEXT: neg a4, s1
+; RV64IDINX-NEXT: neg a3, a3
; RV64IDINX-NEXT: neg a2, a2
; RV64IDINX-NEXT: and a0, a4, a0
; RV64IDINX-NEXT: and a1, a2, a1
@@ -267,10 +269,11 @@ define i128 @fptoui_sat_f64_to_i128(double %a) nounwind {
; RV64IDINX-NEXT: neg s1, a0
; RV64IDINX-NEXT: mv a0, s0
; RV64IDINX-NEXT: call __fixunsdfti
-; RV64IDINX-NEXT: lui a2, %hi(.LCPI5_0)
-; RV64IDINX-NEXT: ld a2, %lo(.LCPI5_0)(a2)
; RV64IDINX-NEXT: and a0, s1, a0
+; RV64IDINX-NEXT: li a2, 1151
; RV64IDINX-NEXT: and a1, s1, a1
+; RV64IDINX-NEXT: slli a2, a2, 52
+; RV64IDINX-NEXT: addi a2, a2, -1
; RV64IDINX-NEXT: flt.d a2, a2, s0
; RV64IDINX-NEXT: neg a2, a2
; RV64IDINX-NEXT: or a0, a2, a0
diff --git a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
index ebda78528810f..0af75a789f7a2 100644
--- a/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-float-convert.ll
@@ -130,16 +130,17 @@ define i128 @fptosi_sat_f32_to_i128(float %a) nounwind {
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: slli a1, a2, 63
; RV64IF-NEXT: .LBB4_2:
-; RV64IF-NEXT: lui a3, %hi(.LCPI4_0)
-; RV64IF-NEXT: flw fa5, %lo(.LCPI4_0)(a3)
+; RV64IF-NEXT: lui a3, 520192
+; RV64IF-NEXT: addi a3, a3, -1
+; RV64IF-NEXT: fmv.w.x fa5, a3
; RV64IF-NEXT: flt.s a3, fa5, fs0
; RV64IF-NEXT: beqz a3, .LBB4_4
; RV64IF-NEXT: # %bb.3:
; RV64IF-NEXT: srli a1, a2, 1
; RV64IF-NEXT: .LBB4_4:
; RV64IF-NEXT: feq.s a2, fs0, fs0
-; RV64IF-NEXT: neg a3, a3
; RV64IF-NEXT: neg a4, s0
+; RV64IF-NEXT: neg a3, a3
; RV64IF-NEXT: neg a2, a2
; RV64IF-NEXT: and a0, a4, a0
; RV64IF-NEXT: and a1, a2, a1
@@ -235,10 +236,11 @@ define i128 @fptoui_sat_f32_to_i128(float %a) nounwind {
; RV64IF-NEXT: fle.s a0, fa5, fa0
; RV64IF-NEXT: neg s0, a0
; RV64IF-NEXT: call __fixunssfti
-; RV64IF-NEXT: lui a2, %hi(.LCPI5_0)
-; RV64IF-NEXT: flw fa5, %lo(.LCPI5_0)(a2)
; RV64IF-NEXT: and a0, s0, a0
+; RV64IF-NEXT: lui a2, 522240
; RV64IF-NEXT: and a1, s0, a1
+; RV64IF-NEXT: addi a2, a2, -1
+; RV64IF-NEXT: fmv.w.x fa5, a2
; RV64IF-NEXT: flt.s a2, fa5, fs0
; RV64IF-NEXT: neg a2, a2
; RV64IF-NEXT: or a0, a2, a0
diff --git a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
index 648f3789953aa..d8f3816b85485 100644
--- a/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
+++ b/llvm/test/CodeGen/RISCV/rv64-half-convert.ll
@@ -208,16 +208,17 @@ define i128 @fptosi_sat_f16_to_i128(half %a) nounwind {
; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: slli a1, a2, 63
; RV64IZFH-NEXT: .LBB4_2:
-; RV64IZFH-NEXT: lui a3, %hi(.LCPI4_0)
-; RV64IZFH-NEXT: flw fa5, %lo(.LCPI4_0)(a3)
+; RV64IZFH-NEXT: lui a3, 520192
+; RV64IZFH-NEXT: addi a3, a3, -1
+; RV64IZFH-NEXT: fmv.w.x fa5, a3
; RV64IZFH-NEXT: flt.s a3, fa5, fs0
; RV64IZFH-NEXT: beqz a3, .LBB4_4
; RV64IZFH-NEXT: # %bb.3:
; RV64IZFH-NEXT: srli a1, a2, 1
; RV64IZFH-NEXT: .LBB4_4:
; RV64IZFH-NEXT: feq.s a2, fs0, fs0
-; RV64IZFH-NEXT: neg a3, a3
; RV64IZFH-NEXT: neg a4, s0
+; RV64IZFH-NEXT: neg a3, a3
; RV64IZFH-NEXT: neg a2, a2
; RV64IZFH-NEXT: and a0, a4, a0
; RV64IZFH-NEXT: and a1, a2, a1
@@ -308,23 +309,25 @@ define i128 @fptoui_sat_f16_to_i128(half %a) nounwind {
; RV64IZFH-NEXT: addi sp, sp, -32
; RV64IZFH-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
-; RV64IZFH-NEXT: lui a0, %hi(.LCPI5_0)
-; RV64IZFH-NEXT: flw fa5, %lo(.LCPI5_0)(a0)
-; RV64IZFH-NEXT: fcvt.s.h fa0, fa0
-; RV64IZFH-NEXT: fmv.w.x fa4, zero
-; RV64IZFH-NEXT: fle.s a0, fa4, fa0
-; RV64IZFH-NEXT: flt.s a1, fa5, fa0
-; RV64IZFH-NEXT: neg s0, a1
-; RV64IZFH-NEXT: neg s1, a0
+; RV64IZFH-NEXT: fsw fs0, 12(sp) # 4-byte Folded Spill
+; RV64IZFH-NEXT: fcvt.s.h fs0, fa0
+; RV64IZFH-NEXT: fmv.w.x fa5, zero
+; RV64IZFH-NEXT: fle.s a0, fa5, fs0
+; RV64IZFH-NEXT: neg s0, a0
+; RV64IZFH-NEXT: fmv.s fa0, fs0
; RV64IZFH-NEXT: call __fixunssfti
-; RV64IZFH-NEXT: and a0, s1, a0
-; RV64IZFH-NEXT: and a1, s1, a1
-; RV64IZFH-NEXT: or a0, s0, a0
-; RV64IZFH-NEXT: or a1, s0, a1
+; RV64IZFH-NEXT: and a0, s0, a0
+; RV64IZFH-NEXT: lui a2, 522240
+; RV64IZFH-NEXT: and a1, s0, a1
+; RV64IZFH-NEXT: addi a2, a2, -1
+; RV64IZFH-NEXT: fmv.w.x fa5, a2
+; RV64IZFH-NEXT: flt.s a2, fa5, fs0
+; RV64IZFH-NEXT: neg a2, a2
+; RV64IZFH-NEXT: or a0, a2, a0
+; RV64IZFH-NEXT: or a1, a2, a1
; RV64IZFH-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
-; RV64IZFH-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: flw fs0, 12(sp) # 4-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 32
; RV64IZFH-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index 2fe8c8ce7975a..6507349f45a2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -1,16 +1,16 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFH
declare <vscale x 1 x bfloat> @llvm.vp.ceil.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -407,10 +407,11 @@ declare <vscale x 1 x half> @llvm.vp.ceil.nxv1f16(<vscale x 1 x half>, <vscale x
define <vscale x 1 x half> @vp_ceil_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI12_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -453,10 +454,11 @@ define <vscale x 1 x half> @vp_ceil_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
define <vscale x 1 x half> @vp_ceil_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv1f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI13_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -493,10 +495,11 @@ declare <vscale x 2 x half> @llvm.vp.ceil.nxv2f16(<vscale x 2 x half>, <vscale x
define <vscale x 2 x half> @vp_ceil_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI14_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -539,10 +542,11 @@ define <vscale x 2 x half> @vp_ceil_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
define <vscale x 2 x half> @vp_ceil_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI15_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -579,10 +583,11 @@ declare <vscale x 4 x half> @llvm.vp.ceil.nxv4f16(<vscale x 4 x half>, <vscale x
define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -625,10 +630,11 @@ define <vscale x 4 x half> @vp_ceil_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
define <vscale x 4 x half> @vp_ceil_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -667,9 +673,10 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -713,10 +720,11 @@ define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
define <vscale x 8 x half> @vp_ceil_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -755,9 +763,10 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v12, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -801,10 +810,11 @@ define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
define <vscale x 16 x half> @vp_ceil_vv_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -843,9 +853,10 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v16, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -922,10 +933,11 @@ define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
define <vscale x 32 x half> @vp_ceil_vv_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_vv_nxv32f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -1210,41 +1222,75 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32_unmasked(<vscale x 16 x float>
declare <vscale x 1 x double> @llvm.vp.ceil.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @vp_ceil_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv1f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.ceil.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
}
define <vscale x 1 x double> @vp_ceil_vv_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv1f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv1f64_unmasked:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZFH-NEXT: vfabs.v v9, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv1f64_unmasked:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZFH-NEXT: vfabs.v v9, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.ceil.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x double> %v
}
@@ -1252,43 +1298,79 @@ define <vscale x 1 x double> @vp_ceil_vv_nxv1f64_unmasked(<vscale x 1 x double>
declare <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZFH-NEXT: vmv1r.v v10, v0
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vmv1r.v v0, v10
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv2f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZFH-NEXT: vmv1r.v v10, v0
+; RV64ZFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vmv1r.v v0, v10
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %v
}
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI37_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv2f64_unmasked:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZFH-NEXT: vfabs.v v10, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv2f64_unmasked:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZFH-NEXT: vfabs.v v10, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
@@ -1296,43 +1378,79 @@ define <vscale x 2 x double> @vp_ceil_vv_nxv2f64_unmasked(<vscale x 2 x double>
declare <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZFH-NEXT: vmv1r.v v12, v0
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vmv1r.v v0, v12
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv4f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZFH-NEXT: vmv1r.v v12, v0
+; RV64ZFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vmv1r.v v0, v12
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI39_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv4f64_unmasked:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZFH-NEXT: vfabs.v v12, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv4f64_unmasked:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZFH-NEXT: vfabs.v v12, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x double> %v
}
@@ -1340,43 +1458,79 @@ define <vscale x 4 x double> @vp_ceil_vv_nxv4f64_unmasked(<vscale x 4 x double>
declare <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv7f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT: vmv1r.v v16, v0
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vmv1r.v v0, v16
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv7f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT: vmv1r.v v16, v0
+; RV64ZFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vmv1r.v v0, v16
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x double> %v
}
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv7f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI41_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv7f64_unmasked:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v16, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv7f64_unmasked:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v16, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x double> %v
}
@@ -1384,43 +1538,79 @@ define <vscale x 7 x double> @vp_ceil_vv_nxv7f64_unmasked(<vscale x 7 x double>
declare <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT: vmv1r.v v16, v0
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vmv1r.v v0, v16
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv8f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT: vmv1r.v v16, v0
+; RV64ZFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vmv1r.v v0, v16
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI43_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv8f64_unmasked:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v16, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv8f64_unmasked:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v16, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x double> %v
}
@@ -1429,87 +1619,167 @@ define <vscale x 8 x double> @vp_ceil_vv_nxv8f64_unmasked(<vscale x 8 x double>
declare <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
-; CHECK-NEXT: sub a2, a0, a1
-; CHECK-NEXT: vslidedown.vx v6, v0, a3
-; CHECK-NEXT: sltu a3, a0, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a2, 3
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB44_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZFH-NEXT: vmv1r.v v7, v0
+; RV32ZFH-NEXT: csrr a1, vlenb
+; RV32ZFH-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZFH-NEXT: srli a3, a1, 3
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZFH-NEXT: sub a2, a0, a1
+; RV32ZFH-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZFH-NEXT: sltu a3, a0, a2
+; RV32ZFH-NEXT: addi a3, a3, -1
+; RV32ZFH-NEXT: and a2, a3, a2
+; RV32ZFH-NEXT: vmv1r.v v0, v6
+; RV32ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZFH-NEXT: fsrmi a2, 3
+; RV32ZFH-NEXT: vmv1r.v v0, v6
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZFH-NEXT: fsrm a2
+; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZFH-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZFH-NEXT: # %bb.1:
+; RV32ZFH-NEXT: mv a0, a1
+; RV32ZFH-NEXT: .LBB44_2:
+; RV32ZFH-NEXT: vmv1r.v v0, v7
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vmv1r.v v0, v7
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv16f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZFH-NEXT: vmv1r.v v7, v0
+; RV64ZFH-NEXT: csrr a1, vlenb
+; RV64ZFH-NEXT: li a2, 1075
+; RV64ZFH-NEXT: srli a3, a1, 3
+; RV64ZFH-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZFH-NEXT: sub a3, a0, a1
+; RV64ZFH-NEXT: slli a2, a2, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a2
+; RV64ZFH-NEXT: sltu a2, a0, a3
+; RV64ZFH-NEXT: addi a2, a2, -1
+; RV64ZFH-NEXT: and a2, a2, a3
+; RV64ZFH-NEXT: vmv1r.v v0, v6
+; RV64ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZFH-NEXT: fsrmi a2, 3
+; RV64ZFH-NEXT: vmv1r.v v0, v6
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZFH-NEXT: fsrm a2
+; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZFH-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZFH-NEXT: # %bb.1:
+; RV64ZFH-NEXT: mv a0, a1
+; RV64ZFH-NEXT: .LBB44_2:
+; RV64ZFH-NEXT: vmv1r.v v0, v7
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vmv1r.v v0, v7
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
}
define <vscale x 16 x double> @vp_ceil_vv_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_vv_nxv16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI45_0)
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a2, 3
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB45_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB45_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: vp_ceil_vv_nxv16f64_unmasked:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: csrr a1, vlenb
+; RV32ZFH-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZFH-NEXT: sub a3, a0, a1
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZFH-NEXT: sltu a2, a0, a3
+; RV32ZFH-NEXT: addi a2, a2, -1
+; RV32ZFH-NEXT: and a2, a2, a3
+; RV32ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v24, v16
+; RV32ZFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZFH-NEXT: fsrmi a2, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZFH-NEXT: fsrm a2
+; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZFH-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZFH-NEXT: # %bb.1:
+; RV32ZFH-NEXT: mv a0, a1
+; RV32ZFH-NEXT: .LBB45_2:
+; RV32ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v24, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: vp_ceil_vv_nxv16f64_unmasked:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: csrr a1, vlenb
+; RV64ZFH-NEXT: li a2, 1075
+; RV64ZFH-NEXT: sub a3, a0, a1
+; RV64ZFH-NEXT: slli a2, a2, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a2
+; RV64ZFH-NEXT: sltu a2, a0, a3
+; RV64ZFH-NEXT: addi a2, a2, -1
+; RV64ZFH-NEXT: and a2, a2, a3
+; RV64ZFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v24, v16
+; RV64ZFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZFH-NEXT: fsrmi a2, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZFH-NEXT: fsrm a2
+; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZFH-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZFH-NEXT: # %bb.1:
+; RV64ZFH-NEXT: mv a0, a1
+; RV64ZFH-NEXT: .LBB45_2:
+; RV64ZFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v24, v8
+; RV64ZFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZFH-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.ceil.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll
index 8c63c2d4be8c1..51dc7b0714d7f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/double-round-conv.ll
@@ -32,10 +32,11 @@ define <vscale x 1 x i8> @trunc_nxv1f64_to_si8(<vscale x 1 x double> %x) {
;
; RV64-LABEL: trunc_nxv1f64_to_si8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI0_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI0_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -75,10 +76,11 @@ define <vscale x 1 x i8> @trunc_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
;
; RV64-LABEL: trunc_nxv1f64_to_ui8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI1_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI1_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -116,10 +118,11 @@ define <vscale x 1 x i16> @trunc_nxv1f64_to_si16(<vscale x 1 x double> %x) {
;
; RV64-LABEL: trunc_nxv1f64_to_si16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI2_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI2_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -155,10 +158,11 @@ define <vscale x 1 x i16> @trunc_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
;
; RV64-LABEL: trunc_nxv1f64_to_ui16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI3_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI3_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -274,10 +278,11 @@ define <vscale x 4 x i8> @trunc_nxv4f64_to_si8(<vscale x 4 x double> %x) {
;
; RV64-LABEL: trunc_nxv4f64_to_si8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI8_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI8_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -317,10 +322,11 @@ define <vscale x 4 x i8> @trunc_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
;
; RV64-LABEL: trunc_nxv4f64_to_ui8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI9_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -358,10 +364,11 @@ define <vscale x 4 x i16> @trunc_nxv4f64_to_si16(<vscale x 4 x double> %x) {
;
; RV64-LABEL: trunc_nxv4f64_to_si16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI10_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI10_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -397,10 +404,11 @@ define <vscale x 4 x i16> @trunc_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
;
; RV64-LABEL: trunc_nxv4f64_to_ui16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI11_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -518,10 +526,11 @@ define <vscale x 1 x i8> @ceil_nxv1f64_to_si8(<vscale x 1 x double> %x) {
;
; RV64-LABEL: ceil_nxv1f64_to_si8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI16_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI16_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -565,10 +574,11 @@ define <vscale x 1 x i8> @ceil_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
;
; RV64-LABEL: ceil_nxv1f64_to_ui8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI17_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -610,10 +620,11 @@ define <vscale x 1 x i16> @ceil_nxv1f64_to_si16(<vscale x 1 x double> %x) {
;
; RV64-LABEL: ceil_nxv1f64_to_si16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI18_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -653,10 +664,11 @@ define <vscale x 1 x i16> @ceil_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
;
; RV64-LABEL: ceil_nxv1f64_to_ui16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI19_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -792,10 +804,11 @@ define <vscale x 4 x i8> @ceil_nxv4f64_to_si8(<vscale x 4 x double> %x) {
;
; RV64-LABEL: ceil_nxv4f64_to_si8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI24_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -839,10 +852,11 @@ define <vscale x 4 x i8> @ceil_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
;
; RV64-LABEL: ceil_nxv4f64_to_ui8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI25_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI25_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -884,10 +898,11 @@ define <vscale x 4 x i16> @ceil_nxv4f64_to_si16(<vscale x 4 x double> %x) {
;
; RV64-LABEL: ceil_nxv4f64_to_si16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI26_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI26_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -927,10 +942,11 @@ define <vscale x 4 x i16> @ceil_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
;
; RV64-LABEL: ceil_nxv4f64_to_ui16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI27_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI27_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: fsrmi a0, 3
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -1064,10 +1080,11 @@ define <vscale x 1 x i8> @rint_nxv1f64_to_si8(<vscale x 1 x double> %x) {
;
; RV64-LABEL: rint_nxv1f64_to_si8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI32_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI32_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -1107,10 +1124,11 @@ define <vscale x 1 x i8> @rint_nxv1f64_to_ui8(<vscale x 1 x double> %x) {
;
; RV64-LABEL: rint_nxv1f64_to_ui8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI33_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI33_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -1148,10 +1166,11 @@ define <vscale x 1 x i16> @rint_nxv1f64_to_si16(<vscale x 1 x double> %x) {
;
; RV64-LABEL: rint_nxv1f64_to_si16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI34_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI34_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -1187,10 +1206,11 @@ define <vscale x 1 x i16> @rint_nxv1f64_to_ui16(<vscale x 1 x double> %x) {
;
; RV64-LABEL: rint_nxv1f64_to_ui16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI35_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI35_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v9, fa5
; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -1306,10 +1326,11 @@ define <vscale x 4 x i8> @rint_nxv4f64_to_si8(<vscale x 4 x double> %x) {
;
; RV64-LABEL: rint_nxv4f64_to_si8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI40_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -1349,10 +1370,11 @@ define <vscale x 4 x i8> @rint_nxv4f64_to_ui8(<vscale x 4 x double> %x) {
;
; RV64-LABEL: rint_nxv4f64_to_ui8:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI41_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI41_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -1390,10 +1412,11 @@ define <vscale x 4 x i16> @rint_nxv4f64_to_si16(<vscale x 4 x double> %x) {
;
; RV64-LABEL: rint_nxv4f64_to_si16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI42_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -1429,10 +1452,11 @@ define <vscale x 4 x i16> @rint_nxv4f64_to_ui16(<vscale x 4 x double> %x) {
;
; RV64-LABEL: rint_nxv4f64_to_ui16:
; RV64: # %bb.0:
-; RV64-NEXT: lui a0, %hi(.LCPI43_0)
-; RV64-NEXT: fld fa5, %lo(.LCPI43_0)(a0)
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
; RV64-NEXT: vmflt.vf v0, v12, fa5
; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll
index 1626b362fed15..316a84f98be2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) strictfp {
; CHECK-LABEL: ceil_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -32,10 +33,11 @@ define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -55,10 +57,11 @@ define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -78,10 +81,11 @@ define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -101,10 +105,11 @@ define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -124,10 +129,11 @@ define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
@@ -258,92 +264,168 @@ define <vscale x 16 x float> @ceil_nxv16f32(<vscale x 16 x float> %x) strictfp {
declare <vscale x 16 x float> @llvm.experimental.constrained.ceil.nxv16f32(<vscale x 16 x float>, metadata)
define <vscale x 1 x double> @ceil_nxv1f64(<vscale x 1 x double> %x) strictfp {
-; CHECK-LABEL: ceil_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_nxv1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 1 x double> @llvm.experimental.constrained.ceil.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.experimental.constrained.ceil.nxv1f64(<vscale x 1 x double>, metadata)
define <vscale x 2 x double> @ceil_nxv2f64(<vscale x 2 x double> %x) strictfp {
-; CHECK-LABEL: ceil_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_nxv2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 2 x double> @llvm.experimental.constrained.ceil.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.experimental.constrained.ceil.nxv2f64(<vscale x 2 x double>, metadata)
define <vscale x 4 x double> @ceil_nxv4f64(<vscale x 4 x double> %x) strictfp {
-; CHECK-LABEL: ceil_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_nxv4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 4 x double> @llvm.experimental.constrained.ceil.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.experimental.constrained.ceil.nxv4f64(<vscale x 4 x double>, metadata)
define <vscale x 8 x double> @ceil_nxv8f64(<vscale x 8 x double> %x) strictfp {
-; CHECK-LABEL: ceil_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_nxv8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_nxv8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 8 x double> @llvm.experimental.constrained.ceil.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
index 4aca2d694dfbb..56edec1cc7a68 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fceil-sdnode.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFHMIN
define <vscale x 1 x bfloat> @ceil_nxv1bf16(<vscale x 1 x bfloat> %x) {
; CHECK-LABEL: ceil_nxv1bf16:
@@ -167,10 +167,11 @@ define <vscale x 32 x bfloat> @ceil_nxv32bf16(<vscale x 32 x bfloat> %x) {
define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) {
; ZVFH-LABEL: ceil_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -206,10 +207,11 @@ declare <vscale x 1 x half> @llvm.ceil.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) {
; ZVFH-LABEL: ceil_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -245,10 +247,11 @@ declare <vscale x 2 x half> @llvm.ceil.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) {
; ZVFH-LABEL: ceil_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI8_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -284,10 +287,11 @@ declare <vscale x 4 x half> @llvm.ceil.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) {
; ZVFH-LABEL: ceil_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI9_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -323,10 +327,11 @@ declare <vscale x 8 x half> @llvm.ceil.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) {
; ZVFH-LABEL: ceil_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI10_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -362,10 +367,11 @@ declare <vscale x 16 x half> @llvm.ceil.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) {
; ZVFH-LABEL: ceil_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -513,80 +519,268 @@ define <vscale x 16 x float> @ceil_nxv16f32(<vscale x 16 x float> %x) {
declare <vscale x 16 x float> @llvm.ceil.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x double> @ceil_nxv1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: ceil_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: ceil_nxv1f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZFH-NEXT: vfabs.v v9, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: ceil_nxv1f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZFH-NEXT: vfabs.v v9, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: ceil_nxv1f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 3
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: ceil_nxv1f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 3
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.ceil.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x double> @ceil_nxv2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: ceil_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: ceil_nxv2f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZFH-NEXT: vfabs.v v10, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: ceil_nxv2f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZFH-NEXT: vfabs.v v10, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: ceil_nxv2f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 3
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: ceil_nxv2f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 3
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.ceil.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x double> @ceil_nxv4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: ceil_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: ceil_nxv4f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZFH-NEXT: vfabs.v v12, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: ceil_nxv4f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZFH-NEXT: vfabs.v v12, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: ceil_nxv4f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 3
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: ceil_nxv4f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 3
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.ceil.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x double> @ceil_nxv8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: ceil_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: ceil_nxv8f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v16, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZFH-NEXT: fsrmi a0, 3
+; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: ceil_nxv8f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v16, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZFH-NEXT: fsrmi a0, 3
+; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: ceil_nxv8f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 3
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: ceil_nxv8f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 3
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 8 x double> @llvm.ceil.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll
index d93f15ec44053..7045fc7c50847 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) strictfp {
; CHECK-LABEL: floor_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -32,10 +33,11 @@ define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -55,10 +57,11 @@ define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -78,10 +81,11 @@ define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -101,10 +105,11 @@ define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -124,10 +129,11 @@ define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
@@ -258,92 +264,168 @@ define <vscale x 16 x float> @floor_nxv16f32(<vscale x 16 x float> %x) strictfp
declare <vscale x 16 x float> @llvm.experimental.constrained.floor.nxv16f32(<vscale x 16 x float>, metadata)
define <vscale x 1 x double> @floor_nxv1f64(<vscale x 1 x double> %x) strictfp {
-; CHECK-LABEL: floor_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_nxv1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_nxv1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 1 x double> @llvm.experimental.constrained.floor.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.experimental.constrained.floor.nxv1f64(<vscale x 1 x double>, metadata)
define <vscale x 2 x double> @floor_nxv2f64(<vscale x 2 x double> %x) strictfp {
-; CHECK-LABEL: floor_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_nxv2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_nxv2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 2 x double> @llvm.experimental.constrained.floor.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.experimental.constrained.floor.nxv2f64(<vscale x 2 x double>, metadata)
define <vscale x 4 x double> @floor_nxv4f64(<vscale x 4 x double> %x) strictfp {
-; CHECK-LABEL: floor_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_nxv4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_nxv4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 4 x double> @llvm.experimental.constrained.floor.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.experimental.constrained.floor.nxv4f64(<vscale x 4 x double>, metadata)
define <vscale x 8 x double> @floor_nxv8f64(<vscale x 8 x double> %x) strictfp {
-; CHECK-LABEL: floor_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_nxv8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_nxv8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 8 x double> @llvm.experimental.constrained.floor.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
index 010d7786c8891..9adbca55bcd01 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-sdnode.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZFHMIN
define <vscale x 1 x bfloat> @floor_nxv1bf16(<vscale x 1 x bfloat> %x) {
; CHECK-LABEL: floor_nxv1bf16:
@@ -173,10 +173,11 @@ declare <vscale x 32 x bfloat> @llvm.floor.nxv32bf16(<vscale x 32 x bfloat>)
define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) {
; ZVFH-LABEL: floor_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -212,10 +213,11 @@ declare <vscale x 1 x half> @llvm.floor.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) {
; ZVFH-LABEL: floor_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -251,10 +253,11 @@ declare <vscale x 2 x half> @llvm.floor.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) {
; ZVFH-LABEL: floor_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI8_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -290,10 +293,11 @@ declare <vscale x 4 x half> @llvm.floor.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) {
; ZVFH-LABEL: floor_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI9_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -329,10 +333,11 @@ declare <vscale x 8 x half> @llvm.floor.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) {
; ZVFH-LABEL: floor_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI10_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -368,10 +373,11 @@ declare <vscale x 16 x half> @llvm.floor.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) {
; ZVFH-LABEL: floor_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -519,80 +525,268 @@ define <vscale x 16 x float> @floor_nxv16f32(<vscale x 16 x float> %x) {
declare <vscale x 16 x float> @llvm.floor.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x double> @floor_nxv1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: floor_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: floor_nxv1f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZFH-NEXT: vfabs.v v9, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZFH-NEXT: fsrmi a0, 2
+; RV32ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: floor_nxv1f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZFH-NEXT: vfabs.v v9, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZFH-NEXT: fsrmi a0, 2
+; RV64ZFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: floor_nxv1f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 2
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: floor_nxv1f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 2
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 1 x double> @llvm.floor.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.floor.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x double> @floor_nxv2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: floor_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: floor_nxv2f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZFH-NEXT: vfabs.v v10, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZFH-NEXT: fsrmi a0, 2
+; RV32ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: floor_nxv2f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZFH-NEXT: vfabs.v v10, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZFH-NEXT: fsrmi a0, 2
+; RV64ZFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: floor_nxv2f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 2
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: floor_nxv2f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 2
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.floor.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x double> @floor_nxv4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: floor_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: floor_nxv4f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZFH-NEXT: vfabs.v v12, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZFH-NEXT: fsrmi a0, 2
+; RV32ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: floor_nxv4f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZFH-NEXT: vfabs.v v12, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZFH-NEXT: fsrmi a0, 2
+; RV64ZFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: floor_nxv4f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 2
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: floor_nxv4f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 2
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 4 x double> @llvm.floor.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.floor.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x double> @floor_nxv8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: floor_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZFH-LABEL: floor_nxv8f64:
+; RV32ZFH: # %bb.0:
+; RV32ZFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZFH-NEXT: vfabs.v v16, v8
+; RV32ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZFH-NEXT: fsrmi a0, 2
+; RV32ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZFH-NEXT: fsrm a0
+; RV32ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZFH-NEXT: ret
+;
+; RV64ZFH-LABEL: floor_nxv8f64:
+; RV64ZFH: # %bb.0:
+; RV64ZFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZFH-NEXT: vfabs.v v16, v8
+; RV64ZFH-NEXT: li a0, 1075
+; RV64ZFH-NEXT: slli a0, a0, 52
+; RV64ZFH-NEXT: fmv.d.x fa5, a0
+; RV64ZFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZFH-NEXT: fsrmi a0, 2
+; RV64ZFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZFH-NEXT: fsrm a0
+; RV64ZFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZFH-NEXT: ret
+;
+; RV32ZFHMIN-LABEL: floor_nxv8f64:
+; RV32ZFHMIN: # %bb.0:
+; RV32ZFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZFHMIN-NEXT: fsrmi a0, 2
+; RV32ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZFHMIN-NEXT: fsrm a0
+; RV32ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZFHMIN-NEXT: ret
+;
+; RV64ZFHMIN-LABEL: floor_nxv8f64:
+; RV64ZFHMIN: # %bb.0:
+; RV64ZFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZFHMIN-NEXT: li a0, 1075
+; RV64ZFHMIN-NEXT: slli a0, a0, 52
+; RV64ZFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZFHMIN-NEXT: fsrmi a0, 2
+; RV64ZFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZFHMIN-NEXT: fsrm a0
+; RV64ZFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZFHMIN-NEXT: ret
%a = call <vscale x 8 x double> @llvm.floor.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index c6ff39ad10d6b..4b42c517379ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -1,22 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <2 x half> @llvm.vp.ceil.v2f16(<2 x half>, <2 x i1>, i32)
define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI0_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -59,10 +60,11 @@ define <2 x half> @vp_ceil_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI1_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.ceil.v4f16(<4 x half>, <4 x i1>, i32)
define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI2_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -145,10 +148,11 @@ define <4 x half> @vp_ceil_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI3_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.ceil.v8f16(<8 x half>, <8 x i1>, i32)
define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -231,10 +236,11 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -273,9 +279,10 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
@@ -319,10 +326,11 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 3
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -529,41 +537,141 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
declare <2 x double> @llvm.vp.ceil.v2f64(<2 x double>, <2 x i1>, i32)
define <2 x double> @vp_ceil_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.ceil.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
@@ -571,43 +679,149 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.ceil.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
@@ -615,43 +829,149 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.ceil.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
@@ -659,43 +979,149 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v15f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v15f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v15f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v15f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v15f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
}
define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v15f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v15f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v15f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v15f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v15f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.ceil.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}
@@ -703,43 +1129,149 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
@@ -747,91 +1279,341 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
declare <32 x double> @llvm.vp.ceil.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v6, v0
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB26_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a1, 3
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v32f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v6, v0
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB26_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFH-NEXT: addi a1, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a1
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 3
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 3
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v32f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v6, v0
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB26_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: addi a1, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a1
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: and a0, a0, a1
+; RV64ZVFH-NEXT: fsrmi a1, 3
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 3
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v32f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB26_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFHMIN-NEXT: addi a1, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 3
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 3
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v32f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB26_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: addi a1, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: and a0, a0, a1
+; RV64ZVFHMIN-NEXT: fsrmi a1, 3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_ceil_v32f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB27_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
-; CHECK-NEXT: addi a2, a0, -16
-; CHECK-NEXT: sltu a0, a0, a2
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: fsrmi a2, 3
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v7, v24, fa5
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsrmi a1, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_ceil_v32f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB27_2:
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFH-NEXT: addi a2, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a2
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a2
+; RV32ZVFH-NEXT: fsrmi a2, 3
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 3
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_ceil_v32f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB27_2:
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: addi a2, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a2
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: and a0, a0, a2
+; RV64ZVFH-NEXT: fsrmi a2, 3
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: fsrmi a1, 3
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_ceil_v32f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB27_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFHMIN-NEXT: addi a2, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a2
+; RV32ZVFHMIN-NEXT: fsrmi a2, 3
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 3
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_ceil_v32f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB27_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: addi a2, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: and a0, a0, a2
+; RV64ZVFHMIN-NEXT: fsrmi a2, 3
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a1, 3
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.ceil.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll
index ab2d00b9b9137..71b0624d91f22 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
define <1 x half> @ceil_v1f16(<1 x half> %x) strictfp {
; CHECK-LABEL: ceil_v1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -32,10 +33,11 @@ define <2 x half> @ceil_v2f16(<2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -55,10 +57,11 @@ define <4 x half> @ceil_v4f16(<4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -78,10 +81,11 @@ define <8 x half> @ceil_v8f16(<8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -101,10 +105,11 @@ define <16 x half> @ceil_v16f16(<16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -123,11 +128,12 @@ define <32 x half> @ceil_v32f16(<32 x half> %x) strictfp {
; CHECK-LABEL: ceil_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
+; CHECK-NEXT: li a1, 25
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT: slli a1, a1, 10
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: fmv.h.x fa5, a1
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 3
@@ -259,92 +265,168 @@ define <16 x float> @ceil_v16f32(<16 x float> %x) strictfp {
declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata)
define <1 x double> @ceil_v1f64(<1 x double> %x) strictfp {
-; CHECK-LABEL: ceil_v1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_v1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_v1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict")
ret <1 x double> %a
}
declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata)
define <2 x double> @ceil_v2f64(<2 x double> %x) strictfp {
-; CHECK-LABEL: ceil_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict")
ret <2 x double> %a
}
declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
define <4 x double> @ceil_v4f64(<4 x double> %x) strictfp {
-; CHECK-LABEL: ceil_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double> %x, metadata !"fpexcept.strict")
ret <4 x double> %a
}
declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
define <8 x double> @ceil_v8f64(<8 x double> %x) strictfp {
-; CHECK-LABEL: ceil_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: ceil_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 3
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: ceil_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 3
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <8 x double> @llvm.experimental.constrained.ceil.v8f64(<8 x double> %x, metadata !"fpexcept.strict")
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll
index c6ce7c1bbe8b4..9eca66eea865c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
define <1 x half> @floor_v1f16(<1 x half> %x) strictfp {
; CHECK-LABEL: floor_v1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -32,10 +33,11 @@ define <2 x half> @floor_v2f16(<2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -55,10 +57,11 @@ define <4 x half> @floor_v4f16(<4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -78,10 +81,11 @@ define <8 x half> @floor_v8f16(<8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -101,10 +105,11 @@ define <16 x half> @floor_v16f16(<16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 2
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -123,11 +128,12 @@ define <32 x half> @floor_v32f16(<32 x half> %x) strictfp {
; CHECK-LABEL: floor_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
+; CHECK-NEXT: li a1, 25
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT: slli a1, a1, 10
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: fmv.h.x fa5, a1
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 2
@@ -259,92 +265,168 @@ define <16 x float> @floor_v16f32(<16 x float> %x) strictfp {
declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata)
define <1 x double> @floor_v1f64(<1 x double> %x) strictfp {
-; CHECK-LABEL: floor_v1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_v1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_v1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict")
ret <1 x double> %a
}
declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
define <2 x double> @floor_v2f64(<2 x double> %x) strictfp {
-; CHECK-LABEL: floor_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict")
ret <2 x double> %a
}
declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
define <4 x double> @floor_v4f64(<4 x double> %x) strictfp {
-; CHECK-LABEL: floor_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double> %x, metadata !"fpexcept.strict")
ret <4 x double> %a
}
declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
define <8 x double> @floor_v8f64(<8 x double> %x) strictfp {
-; CHECK-LABEL: floor_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: floor_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 2
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: floor_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 2
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <8 x double> @llvm.experimental.constrained.floor.v8f64(<8 x double> %x, metadata !"fpexcept.strict")
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 6fc0165d7e77f..4494b97119403 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -1,22 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <2 x half> @llvm.vp.floor.v2f16(<2 x half>, <2 x i1>, i32)
define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI0_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -59,10 +60,11 @@ define <2 x half> @vp_floor_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI1_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.floor.v4f16(<4 x half>, <4 x i1>, i32)
define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI2_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -145,10 +148,11 @@ define <4 x half> @vp_floor_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI3_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.floor.v8f16(<8 x half>, <8 x i1>, i32)
define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -231,10 +236,11 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -273,9 +279,10 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -319,10 +326,11 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -529,41 +537,141 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
declare <2 x double> @llvm.vp.floor.v2f64(<2 x double>, <2 x i1>, i32)
define <2 x double> @vp_floor_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.floor.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
@@ -571,43 +679,149 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.floor.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
@@ -615,43 +829,149 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.floor.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
@@ -659,43 +979,149 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v15f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v15f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v15f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v15f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v15f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
}
define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v15f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v15f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v15f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v15f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v15f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.floor.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}
@@ -703,43 +1129,149 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.floor.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
@@ -747,91 +1279,341 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
declare <32 x double> @llvm.vp.floor.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v6, v0
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB26_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a1, 2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v32f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v6, v0
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB26_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFH-NEXT: addi a1, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a1
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v32f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v6, v0
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB26_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: addi a1, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a1
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: and a0, a0, a1
+; RV64ZVFH-NEXT: fsrmi a1, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v32f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB26_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFHMIN-NEXT: addi a1, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v32f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB26_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: addi a1, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: and a0, a0, a1
+; RV64ZVFHMIN-NEXT: fsrmi a1, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_v32f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB27_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
-; CHECK-NEXT: addi a2, a0, -16
-; CHECK-NEXT: sltu a0, a0, a2
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: fsrmi a2, 2
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v7, v24, fa5
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsrmi a1, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_v32f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB27_2:
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFH-NEXT: addi a2, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a2
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a2
+; RV32ZVFH-NEXT: fsrmi a2, 2
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 2
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_v32f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB27_2:
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: addi a2, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a2
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: and a0, a0, a2
+; RV64ZVFH-NEXT: fsrmi a2, 2
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: fsrmi a1, 2
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_v32f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB27_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFHMIN-NEXT: addi a2, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a2
+; RV32ZVFHMIN-NEXT: fsrmi a2, 2
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 2
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_v32f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB27_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: addi a2, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: and a0, a0, a2
+; RV64ZVFHMIN-NEXT: fsrmi a2, 2
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a1, 2
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.floor.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll
index 3a7ded1537ef6..dd1b99bee6d55 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
declare <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half>, metadata, metadata)
@@ -11,10 +11,11 @@ define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -35,10 +36,11 @@ define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -59,10 +61,11 @@ define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -83,10 +86,11 @@ define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -106,11 +110,12 @@ define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp {
; CHECK-LABEL: nearbyint_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: lui a1, %hi(.LCPI4_0)
+; CHECK-NEXT: li a1, 25
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
+; CHECK-NEXT: slli a1, a1, 10
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: fmv.h.x fa5, a1
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: frflags a0
@@ -224,23 +229,42 @@ define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp {
declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp {
-; CHECK-LABEL: nearbyint_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: nearbyint_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI9_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI9_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%r = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <2 x double> %r
}
@@ -248,23 +272,42 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp {
declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata)
define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp {
-; CHECK-LABEL: nearbyint_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI10_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI10_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: nearbyint_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI10_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI10_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%r = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <4 x double> %r
}
@@ -272,23 +315,42 @@ define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp {
declare <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double>, metadata, metadata)
define <8 x double> @nearbyint_v8f64(<8 x double> %v) strictfp {
-; CHECK-LABEL: nearbyint_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: nearbyint_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%r = call <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <8 x double> %r
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 38df622998bf9..dd415116c2327 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
define void @fadd_v8bf16(ptr %x, ptr %y) {
@@ -3925,8 +3925,9 @@ define void @trunc_v8f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI171_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI171_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -3965,8 +3966,9 @@ define void @trunc_v6f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI172_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI172_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -4022,20 +4024,67 @@ define void @trunc_v4f32(ptr %x) {
}
define void @trunc_v2f64(ptr %x) {
-; CHECK-LABEL: trunc_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI174_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI174_0)(a1)
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: vse64.v v8, (a0)
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: trunc_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vle64.v v8, (a0)
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI174_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI174_0)(a1)
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: vse64.v v8, (a0)
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: trunc_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vle64.v v8, (a0)
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: vse64.v v8, (a0)
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: trunc_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI174_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI174_0)(a1)
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: trunc_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: ret
%a = load <2 x double>, ptr %x
%b = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a)
store <2 x double> %b, ptr %x
@@ -4101,8 +4150,9 @@ define void @ceil_v8f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI177_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI177_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 3
@@ -4145,8 +4195,9 @@ define void @ceil_v6f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI178_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI178_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 3
@@ -4208,22 +4259,75 @@ define void @ceil_v4f32(ptr %x) {
}
define void @ceil_v2f64(ptr %x) {
-; CHECK-LABEL: ceil_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI180_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI180_0)(a1)
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a1, 3
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: vse64.v v8, (a0)
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: ceil_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vle64.v v8, (a0)
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI180_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI180_0)(a1)
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a1, 3
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: vse64.v v8, (a0)
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: ceil_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vle64.v v8, (a0)
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a1, 3
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: vse64.v v8, (a0)
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: ceil_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI180_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI180_0)(a1)
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a1, 3
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: ceil_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a1, 3
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: ret
%a = load <2 x double>, ptr %x
%b = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a)
store <2 x double> %b, ptr %x
@@ -4289,8 +4393,9 @@ define void @floor_v8f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI183_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI183_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 2
@@ -4333,8 +4438,9 @@ define void @floor_v6f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI184_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI184_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 2
@@ -4396,22 +4502,75 @@ define void @floor_v4f32(ptr %x) {
}
define void @floor_v2f64(ptr %x) {
-; CHECK-LABEL: floor_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI186_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI186_0)(a1)
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a1, 2
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: vse64.v v8, (a0)
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: floor_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vle64.v v8, (a0)
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI186_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI186_0)(a1)
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a1, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: vse64.v v8, (a0)
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: floor_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vle64.v v8, (a0)
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a1, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: vse64.v v8, (a0)
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: floor_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI186_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI186_0)(a1)
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a1, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: floor_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a1, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: ret
%a = load <2 x double>, ptr %x
%b = call <2 x double> @llvm.floor.v2f64(<2 x double> %a)
store <2 x double> %b, ptr %x
@@ -4477,8 +4636,9 @@ define void @round_v8f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI189_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI189_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 4
@@ -4521,8 +4681,9 @@ define void @round_v6f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI190_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI190_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 4
@@ -4584,22 +4745,75 @@ define void @round_v4f32(ptr %x) {
}
define void @round_v2f64(ptr %x) {
-; CHECK-LABEL: round_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI192_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI192_0)(a1)
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a1, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: vse64.v v8, (a0)
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vle64.v v8, (a0)
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI192_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI192_0)(a1)
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a1, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: vse64.v v8, (a0)
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vle64.v v8, (a0)
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a1, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: vse64.v v8, (a0)
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI192_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI192_0)(a1)
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a1, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a1, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: ret
%a = load <2 x double>, ptr %x
%b = call <2 x double> @llvm.round.v2f64(<2 x double> %a)
store <2 x double> %b, ptr %x
@@ -4636,8 +4850,9 @@ define void @rint_v8f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI194_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI194_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -4693,20 +4908,67 @@ define void @rint_v4f32(ptr %x) {
}
define void @rint_v2f64(ptr %x) {
-; CHECK-LABEL: rint_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI196_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI196_0)(a1)
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: vse64.v v8, (a0)
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: rint_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vle64.v v8, (a0)
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI196_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI196_0)(a1)
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: vse64.v v8, (a0)
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: rint_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vle64.v v8, (a0)
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: vse64.v v8, (a0)
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: rint_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI196_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI196_0)(a1)
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: rint_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: ret
%a = load <2 x double>, ptr %x
%b = call <2 x double> @llvm.rint.v2f64(<2 x double> %a)
store <2 x double> %b, ptr %x
@@ -4745,8 +5007,9 @@ define void @nearbyint_v8f16(ptr %x) {
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vle16.v v8, (a0)
-; ZVFH-NEXT: lui a1, %hi(.LCPI198_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI198_0)(a1)
+; ZVFH-NEXT: li a1, 25
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a1
@@ -4808,22 +5071,75 @@ define void @nearbyint_v4f32(ptr %x) {
}
define void @nearbyint_v2f64(ptr %x) {
-; CHECK-LABEL: nearbyint_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: lui a1, %hi(.LCPI200_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI200_0)(a1)
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a1
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: fsflags a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: vse64.v v8, (a0)
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: nearbyint_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vle64.v v8, (a0)
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI200_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI200_0)(a1)
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: frflags a1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: fsflags a1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: vse64.v v8, (a0)
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: nearbyint_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vle64.v v8, (a0)
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: frflags a1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: fsflags a1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: vse64.v v8, (a0)
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: nearbyint_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI200_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI200_0)(a1)
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: frflags a1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: nearbyint_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vle64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: frflags a1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vse64.v v8, (a0)
+; RV64ZVFHMIN-NEXT: ret
%a = load <2 x double>, ptr %x
%b = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a)
store <2 x double> %b, ptr %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll
index be32c033fe373..c0b67dd603ebb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-constrained-sdnode.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type.
@@ -11,10 +11,11 @@ define <1 x half> @round_v1f16(<1 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -34,10 +35,11 @@ define <2 x half> @round_v2f16(<2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -57,10 +59,11 @@ define <4 x half> @round_v4f16(<4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -80,10 +83,11 @@ define <8 x half> @round_v8f16(<8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -103,10 +107,11 @@ define <16 x half> @round_v16f16(<16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -125,11 +130,12 @@ define <32 x half> @round_v32f16(<32 x half> %x) strictfp {
; CHECK-LABEL: round_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
+; CHECK-NEXT: li a1, 25
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT: slli a1, a1, 10
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: fmv.h.x fa5, a1
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 4
@@ -261,92 +267,168 @@ define <16 x float> @round_v16f32(<16 x float> %x) strictfp {
declare <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float>, metadata)
define <1 x double> @round_v1f64(<1 x double> %x) strictfp {
-; CHECK-LABEL: round_v1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_v1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_v1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict")
ret <1 x double> %a
}
declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
define <2 x double> @round_v2f64(<2 x double> %x) strictfp {
-; CHECK-LABEL: round_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict")
ret <2 x double> %a
}
declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
define <4 x double> @round_v4f64(<4 x double> %x) strictfp {
-; CHECK-LABEL: round_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double> %x, metadata !"fpexcept.strict")
ret <4 x double> %a
}
declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
define <8 x double> @round_v8f64(<8 x double> %x) strictfp {
-; CHECK-LABEL: round_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <8 x double> @llvm.experimental.constrained.round.v8f64(<8 x double> %x, metadata !"fpexcept.strict")
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
index 774ce5c7859c9..455dc0b83c03d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround.ll
@@ -1,22 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
; This file tests the code generation for `llvm.round.*` on fixed vector type.
define <1 x half> @round_v1f16(<1 x half> %x) {
; ZVFH-LABEL: round_v1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -52,10 +53,11 @@ declare <1 x half> @llvm.round.v1f16(<1 x half>)
define <2 x half> @round_v2f16(<2 x half> %x) {
; ZVFH-LABEL: round_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
; ZVFH-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -91,10 +93,11 @@ declare <2 x half> @llvm.round.v2f16(<2 x half>)
define <4 x half> @round_v4f16(<4 x half> %x) {
; ZVFH-LABEL: round_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -130,10 +133,11 @@ declare <4 x half> @llvm.round.v4f16(<4 x half>)
define <8 x half> @round_v8f16(<8 x half> %x) {
; ZVFH-LABEL: round_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -169,10 +173,11 @@ declare <8 x half> @llvm.round.v8f16(<8 x half>)
define <16 x half> @round_v16f16(<16 x half> %x) {
; ZVFH-LABEL: round_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -208,11 +213,12 @@ declare <16 x half> @llvm.round.v16f16(<16 x half>)
define <32 x half> @round_v32f16(<32 x half> %x) {
; ZVFH-LABEL: round_v32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
; ZVFH-NEXT: li a0, 32
+; ZVFH-NEXT: li a1, 25
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -347,80 +353,268 @@ define <16 x float> @round_v16f32(<16 x float> %x) {
declare <16 x float> @llvm.round.v16f32(<16 x float>)
define <1 x double> @round_v1f64(<1 x double> %x) {
-; CHECK-LABEL: round_v1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_v1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_v1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_v1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_v1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <1 x double> @llvm.round.v1f64(<1 x double> %x)
ret <1 x double> %a
}
declare <1 x double> @llvm.round.v1f64(<1 x double>)
define <2 x double> @round_v2f64(<2 x double> %x) {
-; CHECK-LABEL: round_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <2 x double> @llvm.round.v2f64(<2 x double> %x)
ret <2 x double> %a
}
declare <2 x double> @llvm.round.v2f64(<2 x double>)
define <4 x double> @round_v4f64(<4 x double> %x) {
-; CHECK-LABEL: round_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_v4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_v4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_v4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_v4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <4 x double> @llvm.round.v4f64(<4 x double> %x)
ret <4 x double> %a
}
declare <4 x double> @llvm.round.v4f64(<4 x double>)
define <8 x double> @round_v8f64(<8 x double> %x) {
-; CHECK-LABEL: round_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_v8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_v8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_v8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_v8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <8 x double> @llvm.round.v8f64(<8 x double> %x)
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll
index 5c0279e133dfa..b1d35d3bcdc1d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type.
@@ -11,10 +11,11 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -34,10 +35,11 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -57,10 +59,11 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -80,10 +83,11 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -103,10 +107,11 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -125,11 +130,12 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) strictfp {
; CHECK-LABEL: roundeven_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
+; CHECK-NEXT: li a1, 25
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT: slli a1, a1, 10
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: fmv.h.x fa5, a1
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 0
@@ -261,92 +267,168 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) strictfp {
declare <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float>, metadata)
define <1 x double> @roundeven_v1f64(<1 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_v1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_v1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_v1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict")
ret <1 x double> %a
}
declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
define <2 x double> @roundeven_v2f64(<2 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict")
ret <2 x double> %a
}
declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
define <4 x double> @roundeven_v4f64(<4 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double> %x, metadata !"fpexcept.strict")
ret <4 x double> %a
}
declare <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double>, metadata)
define <8 x double> @roundeven_v8f64(<8 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <8 x double> @llvm.experimental.constrained.roundeven.v8f64(<8 x double> %x, metadata !"fpexcept.strict")
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
index 0b6baad127643..f8b3cb5897dfa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven.ll
@@ -1,22 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
; This file tests the code generation for `llvm.roundeven.*` on fixed vector type.
define <1 x half> @roundeven_v1f16(<1 x half> %x) {
; ZVFH-LABEL: roundeven_v1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI0_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -52,10 +53,11 @@ declare <1 x half> @llvm.roundeven.v1f16(<1 x half>)
define <2 x half> @roundeven_v2f16(<2 x half> %x) {
; ZVFH-LABEL: roundeven_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI1_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
; ZVFH-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -91,10 +93,11 @@ declare <2 x half> @llvm.roundeven.v2f16(<2 x half>)
define <4 x half> @roundeven_v4f16(<4 x half> %x) {
; ZVFH-LABEL: roundeven_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI2_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -130,10 +133,11 @@ declare <4 x half> @llvm.roundeven.v4f16(<4 x half>)
define <8 x half> @roundeven_v8f16(<8 x half> %x) {
; ZVFH-LABEL: roundeven_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI3_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -169,10 +173,11 @@ declare <8 x half> @llvm.roundeven.v8f16(<8 x half>)
define <16 x half> @roundeven_v16f16(<16 x half> %x) {
; ZVFH-LABEL: roundeven_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
; ZVFH-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -208,11 +213,12 @@ declare <16 x half> @llvm.roundeven.v16f16(<16 x half>)
define <32 x half> @roundeven_v32f16(<32 x half> %x) {
; ZVFH-LABEL: roundeven_v32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
; ZVFH-NEXT: li a0, 32
+; ZVFH-NEXT: li a1, 25
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: slli a1, a1, 10
+; ZVFH-NEXT: fmv.h.x fa5, a1
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -347,80 +353,268 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) {
declare <16 x float> @llvm.roundeven.v16f32(<16 x float>)
define <1 x double> @roundeven_v1f64(<1 x double> %x) {
-; CHECK-LABEL: roundeven_v1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_v1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_v1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_v1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_v1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <1 x double> @llvm.roundeven.v1f64(<1 x double> %x)
ret <1 x double> %a
}
declare <1 x double> @llvm.roundeven.v1f64(<1 x double>)
define <2 x double> @roundeven_v2f64(<2 x double> %x) {
-; CHECK-LABEL: roundeven_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %x)
ret <2 x double> %a
}
declare <2 x double> @llvm.roundeven.v2f64(<2 x double>)
define <4 x double> @roundeven_v4f64(<4 x double> %x) {
-; CHECK-LABEL: roundeven_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_v4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_v4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_v4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_v4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %x)
ret <4 x double> %a
}
declare <4 x double> @llvm.roundeven.v4f64(<4 x double>)
define <8 x double> @roundeven_v8f64(<8 x double> %x) {
-; CHECK-LABEL: roundeven_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_v8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_v8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_v8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_v8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %x)
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
index 2173887e85417..b7cf84fba4210 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp {
; CHECK-LABEL: trunc_v1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -30,10 +31,11 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -51,10 +53,11 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -72,10 +75,11 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -93,10 +97,11 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
@@ -113,11 +118,12 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp {
; CHECK-LABEL: trunc_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
+; CHECK-NEXT: li a1, 25
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
+; CHECK-NEXT: slli a1, a1, 10
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: fmv.h.x fa5, a1
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -237,84 +243,152 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp {
declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata)
define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp {
-; CHECK-LABEL: trunc_v1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_v1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_v1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict")
ret <1 x double> %a
}
declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp {
-; CHECK-LABEL: trunc_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict")
ret <2 x double> %a
}
declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp {
-; CHECK-LABEL: trunc_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double> %x, metadata !"fpexcept.strict")
ret <4 x double> %a
}
declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp {
-; CHECK-LABEL: trunc_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <8 x double> @llvm.experimental.constrained.trunc.v8f64(<8 x double> %x, metadata !"fpexcept.strict")
ret <8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index b6c441290ee45..08da7d6bc50f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
declare <2 x half> @llvm.vp.nearbyint.v2f16(<2 x half>, <2 x i1>, i32)
define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: frflags a0
@@ -30,10 +31,11 @@ define <2 x half> @vp_nearbyint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
define <2 x half> @vp_nearbyint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v2f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -51,10 +53,11 @@ declare <4 x half> @llvm.vp.nearbyint.v4f16(<4 x half>, <4 x i1>, i32)
define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfabs.v v9, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: frflags a0
@@ -72,10 +75,11 @@ define <4 x half> @vp_nearbyint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
define <4 x half> @vp_nearbyint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v4f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -93,10 +97,11 @@ declare <8 x half> @llvm.vp.nearbyint.v8f16(<8 x half>, <8 x i1>, i32)
define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: frflags a0
@@ -114,10 +119,11 @@ define <8 x half> @vp_nearbyint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
define <8 x half> @vp_nearbyint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -137,9 +143,10 @@ define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; CHECK-NEXT: vfabs.v v12, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
@@ -158,10 +165,11 @@ define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
define <16 x half> @vp_nearbyint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -349,41 +357,75 @@ define <16 x float> @vp_nearbyint_v16f32_unmasked(<16 x float> %va, i32 zeroext
declare <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double>, <2 x i1>, i32)
define <2 x double> @vp_nearbyint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v2f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v2f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <2 x double> @llvm.vp.nearbyint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
@@ -391,43 +433,79 @@ define <2 x double> @vp_nearbyint_v2f64_unmasked(<2 x double> %va, i32 zeroext %
declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32-NEXT: vfabs.v v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vfabs.v v12, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v4f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v4f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
@@ -435,43 +513,79 @@ define <4 x double> @vp_nearbyint_v4f64_unmasked(<4 x double> %va, i32 zeroext %
declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vmv1r.v v12, v0
+; RV32-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32-NEXT: vfabs.v v16, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: vmv1r.v v12, v0
+; RV64-NEXT: vfabs.v v16, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v8f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v8f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
@@ -479,43 +593,79 @@ define <8 x double> @vp_nearbyint_v8f64_unmasked(<8 x double> %va, i32 zeroext %
declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v15f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v15f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v16, v0
+; RV32-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32-NEXT: vfabs.v v24, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v15f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v16, v0
+; RV64-NEXT: vfabs.v v24, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vmv1r.v v0, v16
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
}
define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v15f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v15f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v15f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}
@@ -523,43 +673,79 @@ define <15 x double> @vp_nearbyint_v15f64_unmasked(<15 x double> %va, i32 zeroex
declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v16f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v16, v0
+; RV32-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32-NEXT: vfabs.v v24, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v16f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v16, v0
+; RV64-NEXT: vfabs.v v24, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vmv1r.v v0, v16
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v16f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v16f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
@@ -567,91 +753,175 @@ define <16 x double> @vp_nearbyint_v16f64_unmasked(<16 x double> %va, i32 zeroex
declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v6, v0
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB26_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: frflags a1
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsflags a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v32f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vmv1r.v v6, v0
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vslidedown.vi v7, v0, 2
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB26_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB26_2:
+; RV32-NEXT: vmv1r.v v0, v6
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v8, v0.t
+; RV32-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32-NEXT: frflags a1
+; RV32-NEXT: vmv1r.v v0, v6
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: fsflags a1
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v32f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vmv1r.v v6, v0
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: vslidedown.vi v7, v0, 2
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB26_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB26_2:
+; RV64-NEXT: vmv1r.v v0, v6
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v8, v0.t
+; RV64-NEXT: li a1, 1075
+; RV64-NEXT: slli a1, a1, 52
+; RV64-NEXT: fmv.d.x fa5, a1
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: frflags a1
+; RV64-NEXT: vmv1r.v v0, v6
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: fsflags a1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vp_nearbyint_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_v32f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB27_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
-; CHECK-NEXT: addi a2, a0, -16
-; CHECK-NEXT: sltu a0, a0, a2
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: frflags a2
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v7, v24, fa5
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsflags a2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: frflags a1
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: fsflags a1
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_nearbyint_v32f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB27_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB27_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v8
+; RV32-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32-NEXT: addi a2, a0, -16
+; RV32-NEXT: sltu a0, a0, a2
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a2
+; RV32-NEXT: frflags a2
+; RV32-NEXT: vmflt.vf v0, v24, fa5
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v16
+; RV32-NEXT: vmflt.vf v7, v24, fa5
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: fsflags a2
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: frflags a1
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32-NEXT: fsflags a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_nearbyint_v32f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB27_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB27_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v8
+; RV64-NEXT: li a2, 1075
+; RV64-NEXT: slli a2, a2, 52
+; RV64-NEXT: fmv.d.x fa5, a2
+; RV64-NEXT: addi a2, a0, -16
+; RV64-NEXT: sltu a0, a0, a2
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a2
+; RV64-NEXT: frflags a2
+; RV64-NEXT: vmflt.vf v0, v24, fa5
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v16
+; RV64-NEXT: vmflt.vf v7, v24, fa5
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: fsflags a2
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: frflags a1
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64-NEXT: fsflags a1
+; RV64-NEXT: ret
%v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index a426f8c619e99..eec12212d0d37 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>)
@@ -2083,21 +2083,38 @@ define float @vreduce_fminimum_v128f32_nonans(ptr %x) {
declare double @llvm.vector.reduce.fminimum.v2f64(<2 x double>)
define double @vreduce_fminimum_v2f64(ptr %x) {
-; CHECK-LABEL: vreduce_fminimum_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v9, v8, v8
-; CHECK-NEXT: vcpop.m a0, v9
-; CHECK-NEXT: beqz a0, .LBB123_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI123_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI123_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB123_2:
-; CHECK-NEXT: vfredmin.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fminimum_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v9, v8, v8
+; RV32-NEXT: vcpop.m a0, v9
+; RV32-NEXT: beqz a0, .LBB123_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI123_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI123_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB123_2:
+; RV32-NEXT: vfredmin.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fminimum_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v9, v8, v8
+; RV64-NEXT: vcpop.m a0, v9
+; RV64-NEXT: beqz a0, .LBB123_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB123_2:
+; RV64-NEXT: vfredmin.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <2 x double>, ptr %x
%red = call double @llvm.vector.reduce.fminimum.v2f64(<2 x double> %v)
ret double %red
@@ -2119,21 +2136,38 @@ define double @vreduce_fminimum_v2f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fminimum.v4f64(<4 x double>)
define double @vreduce_fminimum_v4f64(ptr %x) {
-; CHECK-LABEL: vreduce_fminimum_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v10, v8, v8
-; CHECK-NEXT: vcpop.m a0, v10
-; CHECK-NEXT: beqz a0, .LBB125_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI125_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI125_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB125_2:
-; CHECK-NEXT: vfredmin.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fminimum_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v10, v8, v8
+; RV32-NEXT: vcpop.m a0, v10
+; RV32-NEXT: beqz a0, .LBB125_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI125_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI125_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB125_2:
+; RV32-NEXT: vfredmin.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fminimum_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v10, v8, v8
+; RV64-NEXT: vcpop.m a0, v10
+; RV64-NEXT: beqz a0, .LBB125_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB125_2:
+; RV64-NEXT: vfredmin.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <4 x double>, ptr %x
%red = call double @llvm.vector.reduce.fminimum.v4f64(<4 x double> %v)
ret double %red
@@ -2155,21 +2189,38 @@ define double @vreduce_fminimum_v4f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fminimum.v8f64(<8 x double>)
define double @vreduce_fminimum_v8f64(ptr %x) {
-; CHECK-LABEL: vreduce_fminimum_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v12, v8, v8
-; CHECK-NEXT: vcpop.m a0, v12
-; CHECK-NEXT: beqz a0, .LBB127_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI127_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI127_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB127_2:
-; CHECK-NEXT: vfredmin.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fminimum_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v12, v8, v8
+; RV32-NEXT: vcpop.m a0, v12
+; RV32-NEXT: beqz a0, .LBB127_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI127_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI127_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB127_2:
+; RV32-NEXT: vfredmin.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fminimum_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v12, v8, v8
+; RV64-NEXT: vcpop.m a0, v12
+; RV64-NEXT: beqz a0, .LBB127_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB127_2:
+; RV64-NEXT: vfredmin.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <8 x double>, ptr %x
%red = call double @llvm.vector.reduce.fminimum.v8f64(<8 x double> %v)
ret double %red
@@ -2191,21 +2242,38 @@ define double @vreduce_fminimum_v8f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fminimum.v16f64(<16 x double>)
define double @vreduce_fminimum_v16f64(ptr %x) {
-; CHECK-LABEL: vreduce_fminimum_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v16, v8, v8
-; CHECK-NEXT: vcpop.m a0, v16
-; CHECK-NEXT: beqz a0, .LBB129_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI129_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI129_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB129_2:
-; CHECK-NEXT: vfredmin.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fminimum_v16f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v16, v8, v8
+; RV32-NEXT: vcpop.m a0, v16
+; RV32-NEXT: beqz a0, .LBB129_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI129_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI129_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB129_2:
+; RV32-NEXT: vfredmin.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fminimum_v16f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v16, v8, v8
+; RV64-NEXT: vcpop.m a0, v16
+; RV64-NEXT: beqz a0, .LBB129_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB129_2:
+; RV64-NEXT: vfredmin.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <16 x double>, ptr %x
%red = call double @llvm.vector.reduce.fminimum.v16f64(<16 x double> %v)
ret double %red
@@ -2227,29 +2295,54 @@ define double @vreduce_fminimum_v16f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fminimum.v32f64(<32 x double>)
define double @vreduce_fminimum_v32f64(ptr %x) {
-; CHECK-LABEL: vreduce_fminimum_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v7, v24, v24
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT: vfmin.vv v8, v16, v8
-; CHECK-NEXT: vmfne.vv v16, v8, v8
-; CHECK-NEXT: vcpop.m a0, v16
-; CHECK-NEXT: beqz a0, .LBB131_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI131_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI131_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB131_2:
-; CHECK-NEXT: vfredmin.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fminimum_v32f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi a1, a0, 128
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v16, (a0)
+; RV32-NEXT: vle64.v v24, (a1)
+; RV32-NEXT: vmfeq.vv v0, v16, v16
+; RV32-NEXT: vmfeq.vv v7, v24, v24
+; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV32-NEXT: vfmin.vv v8, v16, v8
+; RV32-NEXT: vmfne.vv v16, v8, v8
+; RV32-NEXT: vcpop.m a0, v16
+; RV32-NEXT: beqz a0, .LBB131_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI131_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI131_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB131_2:
+; RV32-NEXT: vfredmin.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fminimum_v32f64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi a1, a0, 128
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v16, (a0)
+; RV64-NEXT: vle64.v v24, (a1)
+; RV64-NEXT: vmfeq.vv v0, v16, v16
+; RV64-NEXT: vmfeq.vv v7, v24, v24
+; RV64-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV64-NEXT: vfmin.vv v8, v16, v8
+; RV64-NEXT: vmfne.vv v16, v8, v8
+; RV64-NEXT: vcpop.m a0, v16
+; RV64-NEXT: beqz a0, .LBB131_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB131_2:
+; RV64-NEXT: vfredmin.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <32 x double>, ptr %x
%red = call double @llvm.vector.reduce.fminimum.v32f64(<32 x double> %v)
ret double %red
@@ -2274,85 +2367,166 @@ define double @vreduce_fminimum_v32f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fminimum.v64f64(<64 x double>)
define double @vreduce_fminimum_v64f64(ptr %x) {
-; CHECK-LABEL: vreduce_fminimum_v64f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, a0, 384
-; CHECK-NEXT: vle64.v v16, (a1)
-; CHECK-NEXT: addi a1, a0, 256
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v7, v16, v16
-; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vle64.v v8, (a1)
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfmin.vv v24, v16, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v7, v8, v8
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfmin.vv v16, v8, v16
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v7, v24, v24
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT: vfmin.vv v8, v16, v8
-; CHECK-NEXT: vmfne.vv v16, v8, v8
-; CHECK-NEXT: vcpop.m a0, v16
-; CHECK-NEXT: beqz a0, .LBB133_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI133_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI133_0)(a0)
-; CHECK-NEXT: j .LBB133_3
-; CHECK-NEXT: .LBB133_2:
-; CHECK-NEXT: vfredmin.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: .LBB133_3:
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fminimum_v64f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV32-NEXT: addi a1, a0, 128
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v24, (a1)
+; RV32-NEXT: addi a1, a0, 384
+; RV32-NEXT: vle64.v v16, (a1)
+; RV32-NEXT: addi a1, a0, 256
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vmfeq.vv v0, v24, v24
+; RV32-NEXT: vmfeq.vv v7, v16, v16
+; RV32-NEXT: vmerge.vvm v8, v24, v16, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vle64.v v8, (a1)
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vmerge.vvm v16, v16, v24, v0
+; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vfmin.vv v24, v16, v24
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vmfeq.vv v0, v16, v16
+; RV32-NEXT: vmfeq.vv v7, v8, v8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vmerge.vvm v16, v16, v8, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vfmin.vv v16, v8, v16
+; RV32-NEXT: vmfeq.vv v0, v16, v16
+; RV32-NEXT: vmfeq.vv v7, v24, v24
+; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV32-NEXT: vfmin.vv v8, v16, v8
+; RV32-NEXT: vmfne.vv v16, v8, v8
+; RV32-NEXT: vcpop.m a0, v16
+; RV32-NEXT: beqz a0, .LBB133_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI133_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI133_0)(a0)
+; RV32-NEXT: j .LBB133_3
+; RV32-NEXT: .LBB133_2:
+; RV32-NEXT: vfredmin.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: .LBB133_3:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fminimum_v64f64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: addi a1, a0, 128
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v24, (a1)
+; RV64-NEXT: addi a1, a0, 384
+; RV64-NEXT: vle64.v v16, (a1)
+; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV64-NEXT: vmfeq.vv v0, v24, v24
+; RV64-NEXT: vmfeq.vv v7, v16, v16
+; RV64-NEXT: vmerge.vvm v8, v24, v16, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV64-NEXT: vle64.v v8, (a1)
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vmerge.vvm v16, v16, v24, v0
+; RV64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vfmin.vv v24, v16, v24
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vmfeq.vv v0, v16, v16
+; RV64-NEXT: vmfeq.vv v7, v8, v8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vmerge.vvm v16, v16, v8, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vmerge.vvm v8, v8, v16, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vfmin.vv v16, v8, v16
+; RV64-NEXT: vmfeq.vv v0, v16, v16
+; RV64-NEXT: vmfeq.vv v7, v24, v24
+; RV64-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV64-NEXT: vfmin.vv v8, v16, v8
+; RV64-NEXT: vmfne.vv v16, v8, v8
+; RV64-NEXT: vcpop.m a0, v16
+; RV64-NEXT: beqz a0, .LBB133_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: j .LBB133_3
+; RV64-NEXT: .LBB133_2:
+; RV64-NEXT: vfredmin.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: .LBB133_3:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
%v = load <64 x double>, ptr %x
%red = call double @llvm.vector.reduce.fminimum.v64f64(<64 x double> %v)
ret double %red
@@ -2765,21 +2939,38 @@ define float @vreduce_fmaximum_v128f32_nonans(ptr %x) {
declare double @llvm.vector.reduce.fmaximum.v2f64(<2 x double>)
define double @vreduce_fmaximum_v2f64(ptr %x) {
-; CHECK-LABEL: vreduce_fmaximum_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v9, v8, v8
-; CHECK-NEXT: vcpop.m a0, v9
-; CHECK-NEXT: beqz a0, .LBB151_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI151_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI151_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB151_2:
-; CHECK-NEXT: vfredmax.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fmaximum_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v9, v8, v8
+; RV32-NEXT: vcpop.m a0, v9
+; RV32-NEXT: beqz a0, .LBB151_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI151_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI151_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB151_2:
+; RV32-NEXT: vfredmax.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fmaximum_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v9, v8, v8
+; RV64-NEXT: vcpop.m a0, v9
+; RV64-NEXT: beqz a0, .LBB151_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB151_2:
+; RV64-NEXT: vfredmax.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <2 x double>, ptr %x
%red = call double @llvm.vector.reduce.fmaximum.v2f64(<2 x double> %v)
ret double %red
@@ -2801,21 +2992,38 @@ define double @vreduce_fmaximum_v2f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fmaximum.v4f64(<4 x double>)
define double @vreduce_fmaximum_v4f64(ptr %x) {
-; CHECK-LABEL: vreduce_fmaximum_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v10, v8, v8
-; CHECK-NEXT: vcpop.m a0, v10
-; CHECK-NEXT: beqz a0, .LBB153_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI153_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI153_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB153_2:
-; CHECK-NEXT: vfredmax.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fmaximum_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v10, v8, v8
+; RV32-NEXT: vcpop.m a0, v10
+; RV32-NEXT: beqz a0, .LBB153_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI153_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI153_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB153_2:
+; RV32-NEXT: vfredmax.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fmaximum_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v10, v8, v8
+; RV64-NEXT: vcpop.m a0, v10
+; RV64-NEXT: beqz a0, .LBB153_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB153_2:
+; RV64-NEXT: vfredmax.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <4 x double>, ptr %x
%red = call double @llvm.vector.reduce.fmaximum.v4f64(<4 x double> %v)
ret double %red
@@ -2837,21 +3045,38 @@ define double @vreduce_fmaximum_v4f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fmaximum.v8f64(<8 x double>)
define double @vreduce_fmaximum_v8f64(ptr %x) {
-; CHECK-LABEL: vreduce_fmaximum_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v12, v8, v8
-; CHECK-NEXT: vcpop.m a0, v12
-; CHECK-NEXT: beqz a0, .LBB155_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI155_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI155_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB155_2:
-; CHECK-NEXT: vfredmax.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fmaximum_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v12, v8, v8
+; RV32-NEXT: vcpop.m a0, v12
+; RV32-NEXT: beqz a0, .LBB155_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI155_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI155_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB155_2:
+; RV32-NEXT: vfredmax.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fmaximum_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v12, v8, v8
+; RV64-NEXT: vcpop.m a0, v12
+; RV64-NEXT: beqz a0, .LBB155_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB155_2:
+; RV64-NEXT: vfredmax.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <8 x double>, ptr %x
%red = call double @llvm.vector.reduce.fmaximum.v8f64(<8 x double> %v)
ret double %red
@@ -2873,21 +3098,38 @@ define double @vreduce_fmaximum_v8f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fmaximum.v16f64(<16 x double>)
define double @vreduce_fmaximum_v16f64(ptr %x) {
-; CHECK-LABEL: vreduce_fmaximum_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vmfne.vv v16, v8, v8
-; CHECK-NEXT: vcpop.m a0, v16
-; CHECK-NEXT: beqz a0, .LBB157_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI157_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI157_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB157_2:
-; CHECK-NEXT: vfredmax.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fmaximum_v16f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: vmfne.vv v16, v8, v8
+; RV32-NEXT: vcpop.m a0, v16
+; RV32-NEXT: beqz a0, .LBB157_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI157_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI157_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB157_2:
+; RV32-NEXT: vfredmax.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fmaximum_v16f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: vmfne.vv v16, v8, v8
+; RV64-NEXT: vcpop.m a0, v16
+; RV64-NEXT: beqz a0, .LBB157_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB157_2:
+; RV64-NEXT: vfredmax.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <16 x double>, ptr %x
%red = call double @llvm.vector.reduce.fmaximum.v16f64(<16 x double> %v)
ret double %red
@@ -2909,29 +3151,54 @@ define double @vreduce_fmaximum_v16f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fmaximum.v32f64(<32 x double>)
define double @vreduce_fmaximum_v32f64(ptr %x) {
-; CHECK-LABEL: vreduce_fmaximum_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a0)
-; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v7, v24, v24
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT: vfmax.vv v8, v16, v8
-; CHECK-NEXT: vmfne.vv v16, v8, v8
-; CHECK-NEXT: vcpop.m a0, v16
-; CHECK-NEXT: beqz a0, .LBB159_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI159_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI159_0)(a0)
-; CHECK-NEXT: ret
-; CHECK-NEXT: .LBB159_2:
-; CHECK-NEXT: vfredmax.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fmaximum_v32f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi a1, a0, 128
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v16, (a0)
+; RV32-NEXT: vle64.v v24, (a1)
+; RV32-NEXT: vmfeq.vv v0, v16, v16
+; RV32-NEXT: vmfeq.vv v7, v24, v24
+; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV32-NEXT: vfmax.vv v8, v16, v8
+; RV32-NEXT: vmfne.vv v16, v8, v8
+; RV32-NEXT: vcpop.m a0, v16
+; RV32-NEXT: beqz a0, .LBB159_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI159_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI159_0)(a0)
+; RV32-NEXT: ret
+; RV32-NEXT: .LBB159_2:
+; RV32-NEXT: vfredmax.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fmaximum_v32f64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi a1, a0, 128
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v16, (a0)
+; RV64-NEXT: vle64.v v24, (a1)
+; RV64-NEXT: vmfeq.vv v0, v16, v16
+; RV64-NEXT: vmfeq.vv v7, v24, v24
+; RV64-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV64-NEXT: vfmax.vv v8, v16, v8
+; RV64-NEXT: vmfne.vv v16, v8, v8
+; RV64-NEXT: vcpop.m a0, v16
+; RV64-NEXT: beqz a0, .LBB159_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: ret
+; RV64-NEXT: .LBB159_2:
+; RV64-NEXT: vfredmax.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: ret
%v = load <32 x double>, ptr %x
%red = call double @llvm.vector.reduce.fmaximum.v32f64(<32 x double> %v)
ret double %red
@@ -2956,85 +3223,166 @@ define double @vreduce_fmaximum_v32f64_nonans(ptr %x) {
declare double @llvm.vector.reduce.fmaximum.v64f64(<64 x double>)
define double @vreduce_fmaximum_v64f64(ptr %x) {
-; CHECK-LABEL: vreduce_fmaximum_v64f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, a0, 384
-; CHECK-NEXT: vle64.v v16, (a1)
-; CHECK-NEXT: addi a1, a0, 256
-; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vmfeq.vv v0, v24, v24
-; CHECK-NEXT: vmfeq.vv v7, v16, v16
-; CHECK-NEXT: vmerge.vvm v8, v24, v16, v0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vle64.v v8, (a1)
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
-; CHECK-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfmax.vv v24, v16, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v7, v8, v8
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfmax.vv v16, v8, v16
-; CHECK-NEXT: vmfeq.vv v0, v16, v16
-; CHECK-NEXT: vmfeq.vv v7, v24, v24
-; CHECK-NEXT: vmerge.vvm v8, v16, v24, v0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
-; CHECK-NEXT: vfmax.vv v8, v16, v8
-; CHECK-NEXT: vmfne.vv v16, v8, v8
-; CHECK-NEXT: vcpop.m a0, v16
-; CHECK-NEXT: beqz a0, .LBB161_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: lui a0, %hi(.LCPI161_0)
-; CHECK-NEXT: fld fa0, %lo(.LCPI161_0)(a0)
-; CHECK-NEXT: j .LBB161_3
-; CHECK-NEXT: .LBB161_2:
-; CHECK-NEXT: vfredmax.vs v8, v8, v8
-; CHECK-NEXT: vfmv.f.s fa0, v8
-; CHECK-NEXT: .LBB161_3:
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; RV32-LABEL: vreduce_fmaximum_v64f64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV32-NEXT: addi a1, a0, 128
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v24, (a1)
+; RV32-NEXT: addi a1, a0, 384
+; RV32-NEXT: vle64.v v16, (a1)
+; RV32-NEXT: addi a1, a0, 256
+; RV32-NEXT: vle64.v v8, (a0)
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vmfeq.vv v0, v24, v24
+; RV32-NEXT: vmfeq.vv v7, v16, v16
+; RV32-NEXT: vmerge.vvm v8, v24, v16, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vle64.v v8, (a1)
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vmerge.vvm v16, v16, v24, v0
+; RV32-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vfmax.vv v24, v16, v24
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vmfeq.vv v0, v16, v16
+; RV32-NEXT: vmfeq.vv v7, v8, v8
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vmerge.vvm v16, v16, v8, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV32-NEXT: vfmax.vv v16, v8, v16
+; RV32-NEXT: vmfeq.vv v0, v16, v16
+; RV32-NEXT: vmfeq.vv v7, v24, v24
+; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV32-NEXT: vfmax.vv v8, v16, v8
+; RV32-NEXT: vmfne.vv v16, v8, v8
+; RV32-NEXT: vcpop.m a0, v16
+; RV32-NEXT: beqz a0, .LBB161_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: lui a0, %hi(.LCPI161_0)
+; RV32-NEXT: fld fa0, %lo(.LCPI161_0)(a0)
+; RV32-NEXT: j .LBB161_3
+; RV32-NEXT: .LBB161_2:
+; RV32-NEXT: vfredmax.vs v8, v8, v8
+; RV32-NEXT: vfmv.f.s fa0, v8
+; RV32-NEXT: .LBB161_3:
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: .cfi_def_cfa sp, 16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: .cfi_def_cfa_offset 0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vreduce_fmaximum_v64f64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: addi a1, a0, 128
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vle64.v v24, (a1)
+; RV64-NEXT: addi a1, a0, 384
+; RV64-NEXT: vle64.v v16, (a1)
+; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV64-NEXT: vmfeq.vv v0, v24, v24
+; RV64-NEXT: vmfeq.vv v7, v16, v16
+; RV64-NEXT: vmerge.vvm v8, v24, v16, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v8, (a0) # vscale x 64-byte Folded Spill
+; RV64-NEXT: vle64.v v8, (a1)
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vmerge.vvm v16, v16, v24, v0
+; RV64-NEXT: vl8r.v v24, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vfmax.vv v24, v16, v24
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vmfeq.vv v0, v16, v16
+; RV64-NEXT: vmfeq.vv v7, v8, v8
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vmerge.vvm v16, v16, v8, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vmerge.vvm v8, v8, v16, v0
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; RV64-NEXT: vfmax.vv v16, v8, v16
+; RV64-NEXT: vmfeq.vv v0, v16, v16
+; RV64-NEXT: vmfeq.vv v7, v24, v24
+; RV64-NEXT: vmerge.vvm v8, v16, v24, v0
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vmerge.vvm v16, v24, v16, v0
+; RV64-NEXT: vfmax.vv v8, v16, v8
+; RV64-NEXT: vmfne.vv v16, v8, v8
+; RV64-NEXT: vcpop.m a0, v16
+; RV64-NEXT: beqz a0, .LBB161_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: lui a0, 4095
+; RV64-NEXT: slli a0, a0, 39
+; RV64-NEXT: fmv.d.x fa0, a0
+; RV64-NEXT: j .LBB161_3
+; RV64-NEXT: .LBB161_2:
+; RV64-NEXT: vfredmax.vs v8, v8, v8
+; RV64-NEXT: vfmv.f.s fa0, v8
+; RV64-NEXT: .LBB161_3:
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: .cfi_def_cfa sp, 16
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: .cfi_def_cfa_offset 0
+; RV64-NEXT: ret
%v = load <64 x double>, ptr %x
%red = call double @llvm.vector.reduce.fmaximum.v64f64(<64 x double> %v)
ret double %red
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 35cd789acfcc8..97cf7e6902e32 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
declare <2 x half> @llvm.vp.rint.v2f16(<2 x half>, <2 x i1>, i32)
define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -28,10 +29,11 @@ define <2 x half> @vp_rint_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
define <2 x half> @vp_rint_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v2f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -47,10 +49,11 @@ declare <4 x half> @llvm.vp.rint.v4f16(<4 x half>, <4 x i1>, i32)
define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfabs.v v9, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -66,10 +69,11 @@ define <4 x half> @vp_rint_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
define <4 x half> @vp_rint_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v4f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -85,10 +89,11 @@ declare <8 x half> @llvm.vp.rint.v8f16(<8 x half>, <8 x i1>, i32)
define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -104,10 +109,11 @@ define <8 x half> @vp_rint_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
define <8 x half> @vp_rint_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -125,9 +131,10 @@ define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; CHECK-NEXT: vfabs.v v12, v8, v0.t
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@@ -144,10 +151,11 @@ define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
define <16 x half> @vp_rint_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f16_unmasked:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI7_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
@@ -317,37 +325,67 @@ define <16 x float> @vp_rint_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
declare <2 x double> @llvm.vp.rint.v2f64(<2 x double>, <2 x i1>, i32)
define <2 x double> @vp_rint_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v2f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v2f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%v = call <2 x double> @llvm.vp.rint.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
@@ -355,39 +393,71 @@ define <2 x double> @vp_rint_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vmv1r.v v10, v0
+; RV32-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32-NEXT: vfabs.v v12, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: vmv1r.v v10, v0
+; RV64-NEXT: vfabs.v v12, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v4f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v4f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%v = call <4 x double> @llvm.vp.rint.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
@@ -395,39 +465,71 @@ define <4 x double> @vp_rint_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vmv1r.v v12, v0
+; RV32-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32-NEXT: vfabs.v v16, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: vmv1r.v v12, v0
+; RV64-NEXT: vfabs.v v16, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v8f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v8f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%v = call <8 x double> @llvm.vp.rint.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
@@ -435,39 +537,71 @@ define <8 x double> @vp_rint_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v15f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v15f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v16, v0
+; RV32-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32-NEXT: vfabs.v v24, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v15f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v16, v0
+; RV64-NEXT: vfabs.v v24, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64-NEXT: vmv1r.v v0, v16
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: ret
%v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
}
define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v15f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v15f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v15f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%v = call <15 x double> @llvm.vp.rint.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}
@@ -475,39 +609,71 @@ define <15 x double> @vp_rint_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v16f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v16, v0
+; RV32-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32-NEXT: vfabs.v v24, v8, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v16f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v16, v0
+; RV64-NEXT: vfabs.v v24, v8, v0.t
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64-NEXT: vmv1r.v v0, v16
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: ret
%v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v16f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v16f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%v = call <16 x double> @llvm.vp.rint.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
@@ -515,83 +681,159 @@ define <16 x double> @vp_rint_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v6, v0
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB26_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v32f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: vmv1r.v v6, v0
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vslidedown.vi v7, v0, 2
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB26_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB26_2:
+; RV32-NEXT: vmv1r.v v0, v6
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v8, v0.t
+; RV32-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32-NEXT: addi a1, a0, -16
+; RV32-NEXT: sltu a0, a0, a1
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32-NEXT: and a0, a0, a1
+; RV32-NEXT: vmv1r.v v0, v6
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v32f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: vmv1r.v v6, v0
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: vslidedown.vi v7, v0, 2
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB26_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB26_2:
+; RV64-NEXT: vmv1r.v v0, v6
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v8, v0.t
+; RV64-NEXT: li a1, 1075
+; RV64-NEXT: slli a1, a1, 52
+; RV64-NEXT: fmv.d.x fa5, a1
+; RV64-NEXT: addi a1, a0, -16
+; RV64-NEXT: sltu a0, a0, a1
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64-NEXT: and a0, a0, a1
+; RV64-NEXT: vmv1r.v v0, v6
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64-NEXT: ret
%v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_v32f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB27_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
-; CHECK-NEXT: addi a2, a0, -16
-; CHECK-NEXT: sltu a0, a0, a2
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v7, v24, fa5
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: vp_rint_v32f64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB27_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB27_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v8
+; RV32-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32-NEXT: addi a2, a0, -16
+; RV32-NEXT: sltu a0, a0, a2
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a2
+; RV32-NEXT: vmflt.vf v0, v24, fa5
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfabs.v v24, v16
+; RV32-NEXT: vmflt.vf v7, v24, fa5
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_rint_v32f64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB27_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB27_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v8
+; RV64-NEXT: li a2, 1075
+; RV64-NEXT: slli a2, a2, 52
+; RV64-NEXT: fmv.d.x fa5, a2
+; RV64-NEXT: addi a2, a0, -16
+; RV64-NEXT: sltu a0, a0, a2
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: vmflt.vf v0, v24, fa5
+; RV64-NEXT: and a0, a0, a2
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfabs.v v24, v16
+; RV64-NEXT: vmflt.vf v7, v24, fa5
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64-NEXT: ret
%v = call <32 x double> @llvm.vp.rint.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index d8ff7062f033e..16c8b2b9da682 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -1,22 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <2 x half> @llvm.vp.round.v2f16(<2 x half>, <2 x i1>, i32)
define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI0_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -59,10 +60,11 @@ define <2 x half> @vp_round_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl)
define <2 x half> @vp_round_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI1_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.round.v4f16(<4 x half>, <4 x i1>, i32)
define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI2_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -145,10 +148,11 @@ define <4 x half> @vp_round_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl)
define <4 x half> @vp_round_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI3_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.round.v8f16(<8 x half>, <8 x i1>, i32)
define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -231,10 +236,11 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
define <8 x half> @vp_round_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -273,9 +279,10 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -319,10 +326,11 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
define <16 x half> @vp_round_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -529,41 +537,141 @@ define <16 x float> @vp_round_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
declare <2 x double> @llvm.vp.round.v2f64(<2 x double>, <2 x i1>, i32)
define <2 x double> @vp_round_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.round.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
@@ -571,43 +679,149 @@ define <2 x double> @vp_round_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.round.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
@@ -615,43 +829,149 @@ define <4 x double> @vp_round_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.round.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
@@ -659,43 +979,149 @@ define <8 x double> @vp_round_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v15f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v15f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v15f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v15f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v15f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
}
define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v15f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v15f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v15f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v15f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v15f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.round.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}
@@ -703,43 +1129,149 @@ define <15 x double> @vp_round_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.round.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
@@ -747,91 +1279,341 @@ define <16 x double> @vp_round_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
declare <32 x double> @llvm.vp.round.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v6, v0
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB26_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a1, 4
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v32f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v6, v0
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB26_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFH-NEXT: addi a1, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a1
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v32f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v6, v0
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB26_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: addi a1, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a1
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: and a0, a0, a1
+; RV64ZVFH-NEXT: fsrmi a1, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v32f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB26_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFHMIN-NEXT: addi a1, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v32f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB26_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: addi a1, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: and a0, a0, a1
+; RV64ZVFHMIN-NEXT: fsrmi a1, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vp_round_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_v32f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB27_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
-; CHECK-NEXT: addi a2, a0, -16
-; CHECK-NEXT: sltu a0, a0, a2
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: fsrmi a2, 4
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v7, v24, fa5
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsrmi a1, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_v32f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB27_2:
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFH-NEXT: addi a2, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a2
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a2
+; RV32ZVFH-NEXT: fsrmi a2, 4
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 4
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_v32f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB27_2:
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: addi a2, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a2
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: and a0, a0, a2
+; RV64ZVFH-NEXT: fsrmi a2, 4
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: fsrmi a1, 4
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_v32f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB27_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFHMIN-NEXT: addi a2, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a2
+; RV32ZVFHMIN-NEXT: fsrmi a2, 4
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 4
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_v32f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB27_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: addi a2, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: and a0, a0, a2
+; RV64ZVFHMIN-NEXT: fsrmi a2, 4
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a1, 4
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.round.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 2649f234375d2..14c550d555cf7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -1,22 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <2 x half> @llvm.vp.roundeven.v2f16(<2 x half>, <2 x i1>, i32)
define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI0_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -59,10 +60,11 @@ define <2 x half> @vp_roundeven_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %
define <2 x half> @vp_roundeven_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI1_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.roundeven.v4f16(<4 x half>, <4 x i1>, i32)
define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI2_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -145,10 +148,11 @@ define <4 x half> @vp_roundeven_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %
define <4 x half> @vp_roundeven_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI3_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.roundeven.v8f16(<8 x half>, <8 x i1>, i32)
define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -231,10 +236,11 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
define <8 x half> @vp_roundeven_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -273,9 +279,10 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -319,10 +326,11 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
define <16 x half> @vp_roundeven_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -529,41 +537,141 @@ define <16 x float> @vp_roundeven_v16f32_unmasked(<16 x float> %va, i32 zeroext
declare <2 x double> @llvm.vp.roundeven.v2f64(<2 x double>, <2 x i1>, i32)
define <2 x double> @vp_roundeven_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.roundeven.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
@@ -571,43 +679,149 @@ define <2 x double> @vp_roundeven_v2f64_unmasked(<2 x double> %va, i32 zeroext %
declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.roundeven.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
@@ -615,43 +829,149 @@ define <4 x double> @vp_roundeven_v4f64_unmasked(<4 x double> %va, i32 zeroext %
declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.roundeven.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
@@ -659,43 +979,149 @@ define <8 x double> @vp_roundeven_v8f64_unmasked(<8 x double> %va, i32 zeroext %
declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v15f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v15f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v15f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v15f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v15f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
}
define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v15f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v15f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v15f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v15f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v15f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.roundeven.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}
@@ -703,43 +1129,149 @@ define <15 x double> @vp_roundeven_v15f64_unmasked(<15 x double> %va, i32 zeroex
declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
@@ -747,91 +1279,341 @@ define <16 x double> @vp_roundeven_v16f64_unmasked(<16 x double> %va, i32 zeroex
declare <32 x double> @llvm.vp.roundeven.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v6, v0
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB26_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a1, 0
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v32f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v6, v0
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB26_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFH-NEXT: addi a1, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a1
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v32f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v6, v0
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB26_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: addi a1, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a1
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: and a0, a0, a1
+; RV64ZVFH-NEXT: fsrmi a1, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v32f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB26_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFHMIN-NEXT: addi a1, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v32f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB26_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: addi a1, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: and a0, a0, a1
+; RV64ZVFHMIN-NEXT: fsrmi a1, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vp_roundeven_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_v32f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB27_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
-; CHECK-NEXT: addi a2, a0, -16
-; CHECK-NEXT: sltu a0, a0, a2
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: fsrmi a2, 0
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v7, v24, fa5
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsrmi a1, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_v32f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB27_2:
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFH-NEXT: addi a2, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a2
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a2
+; RV32ZVFH-NEXT: fsrmi a2, 0
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 0
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_v32f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB27_2:
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: addi a2, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a2
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: and a0, a0, a2
+; RV64ZVFH-NEXT: fsrmi a2, 0
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: fsrmi a1, 0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_v32f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB27_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFHMIN-NEXT: addi a2, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a2
+; RV32ZVFHMIN-NEXT: fsrmi a2, 0
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 0
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_v32f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB27_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: addi a2, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: and a0, a0, a2
+; RV64ZVFHMIN-NEXT: fsrmi a2, 0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a1, 0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.roundeven.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index 50e65b62e7848..16f04f14721d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -1,22 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <2 x half> @llvm.vp.roundtozero.v2f16(<2 x half>, <2 x i1>, i32)
define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI0_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI0_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -59,10 +60,11 @@ define <2 x half> @vp_roundtozero_v2f16(<2 x half> %va, <2 x i1> %m, i32 zeroext
define <2 x half> @vp_roundtozero_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI1_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI1_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -99,10 +101,11 @@ declare <4 x half> @llvm.vp.roundtozero.v4f16(<4 x half>, <4 x i1>, i32)
define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI2_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI2_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -145,10 +148,11 @@ define <4 x half> @vp_roundtozero_v4f16(<4 x half> %va, <4 x i1> %m, i32 zeroext
define <4 x half> @vp_roundtozero_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI3_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI3_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -185,10 +189,11 @@ declare <8 x half> @llvm.vp.roundtozero.v8f16(<8 x half>, <8 x i1>, i32)
define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI4_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -231,10 +236,11 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
define <8 x half> @vp_roundtozero_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI5_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -273,9 +279,10 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -319,10 +326,11 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
define <16 x half> @vp_roundtozero_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -529,41 +537,141 @@ define <16 x float> @vp_roundtozero_v16f32_unmasked(<16 x float> %va, i32 zeroex
declare <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double>, <2 x i1>, i32)
define <2 x double> @vp_roundtozero_v2f64(<2 x double> %va, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI16_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI16_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI16_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> %m, i32 %evl)
ret <2 x double> %v
}
define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <2 x double> @llvm.vp.roundtozero.v2f64(<2 x double> %va, <2 x i1> splat (i1 true), i32 %evl)
ret <2 x double> %v
}
@@ -571,43 +679,149 @@ define <2 x double> @vp_roundtozero_v2f64_unmasked(<2 x double> %va, i32 zeroext
declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> %m, i32 %evl)
ret <4 x double> %v
}
define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double> %va, <4 x i1> splat (i1 true), i32 %evl)
ret <4 x double> %v
}
@@ -615,43 +829,149 @@ define <4 x double> @vp_roundtozero_v4f64_unmasked(<4 x double> %va, i32 zeroext
declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> %m, i32 %evl)
ret <8 x double> %v
}
define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI21_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI21_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double> %va, <8 x i1> splat (i1 true), i32 %evl)
ret <8 x double> %v
}
@@ -659,43 +979,149 @@ define <8 x double> @vp_roundtozero_v8f64_unmasked(<8 x double> %va, i32 zeroext
declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v15f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v15f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v15f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v15f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI22_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI22_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v15f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> %m, i32 %evl)
ret <15 x double> %v
}
define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v15f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v15f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v15f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v15f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI23_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI23_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v15f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double> %va, <15 x i1> splat (i1 true), i32 %evl)
ret <15 x double> %v
}
@@ -703,43 +1129,149 @@ define <15 x double> @vp_roundtozero_v15f64_unmasked(<15 x double> %va, i32 zero
declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI24_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI24_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI24_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> %m, i32 %evl)
ret <16 x double> %v
}
define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI25_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI25_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> %va, <16 x i1> splat (i1 true), i32 %evl)
ret <16 x double> %v
}
@@ -747,91 +1279,341 @@ define <16 x double> @vp_roundtozero_v16f64_unmasked(<16 x double> %va, i32 zero
declare <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v32f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v6, v0
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB26_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v32f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v6, v0
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB26_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFH-NEXT: addi a1, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a1
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v32f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v6, v0
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB26_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a1, 1075
+; RV64ZVFH-NEXT: slli a1, a1, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a1
+; RV64ZVFH-NEXT: addi a1, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a1
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: and a0, a0, a1
+; RV64ZVFH-NEXT: fsrmi a1, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v32f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB26_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI26_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; RV32ZVFHMIN-NEXT: addi a1, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v32f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v6, v0
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: vslidedown.vi v7, v0, 2
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB26_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB26_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a1, 1075
+; RV64ZVFHMIN-NEXT: slli a1, a1, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a1
+; RV64ZVFHMIN-NEXT: addi a1, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a1
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: and a0, a0, a1
+; RV64ZVFHMIN-NEXT: fsrmi a1, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
}
define <32 x double> @vp_roundtozero_v32f64_unmasked(<32 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_v32f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB27_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: lui a2, %hi(.LCPI27_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
-; CHECK-NEXT: addi a2, a0, -16
-; CHECK-NEXT: sltu a0, a0, a2
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: fsrmi a2, 1
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v7, v24, fa5
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_v32f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: li a2, 16
+; RV32ZVFH-NEXT: mv a1, a0
+; RV32ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: li a1, 16
+; RV32ZVFH-NEXT: .LBB27_2:
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFH-NEXT: addi a2, a0, -16
+; RV32ZVFH-NEXT: sltu a0, a0, a2
+; RV32ZVFH-NEXT: addi a0, a0, -1
+; RV32ZVFH-NEXT: and a0, a0, a2
+; RV32ZVFH-NEXT: fsrmi a2, 1
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: fsrmi a1, 1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a1
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_v32f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: li a2, 16
+; RV64ZVFH-NEXT: mv a1, a0
+; RV64ZVFH-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: li a1, 16
+; RV64ZVFH-NEXT: .LBB27_2:
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: addi a2, a0, -16
+; RV64ZVFH-NEXT: sltu a0, a0, a2
+; RV64ZVFH-NEXT: addi a0, a0, -1
+; RV64ZVFH-NEXT: and a0, a0, a2
+; RV64ZVFH-NEXT: fsrmi a2, 1
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFH-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: fsrmi a1, 1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a1
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_v32f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: li a2, 16
+; RV32ZVFHMIN-NEXT: mv a1, a0
+; RV32ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: li a1, 16
+; RV32ZVFHMIN-NEXT: .LBB27_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI27_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI27_0)(a2)
+; RV32ZVFHMIN-NEXT: addi a2, a0, -16
+; RV32ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV32ZVFHMIN-NEXT: addi a0, a0, -1
+; RV32ZVFHMIN-NEXT: and a0, a0, a2
+; RV32ZVFHMIN-NEXT: fsrmi a2, 1
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV32ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a1, 1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a1
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_v32f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: li a2, 16
+; RV64ZVFHMIN-NEXT: mv a1, a0
+; RV64ZVFHMIN-NEXT: bltu a0, a2, .LBB27_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: li a1, 16
+; RV64ZVFHMIN-NEXT: .LBB27_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: addi a2, a0, -16
+; RV64ZVFHMIN-NEXT: sltu a0, a0, a2
+; RV64ZVFHMIN-NEXT: addi a0, a0, -1
+; RV64ZVFHMIN-NEXT: and a0, a0, a2
+; RV64ZVFHMIN-NEXT: fsrmi a2, 1
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5
+; RV64ZVFHMIN-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a1, 1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a1
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double> %va, <32 x i1> splat (i1 true), i32 %evl)
ret <32 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll
index af79ace04cf54..965d0b0fe0f9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-fp.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zvfbfmin,+v -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
define <4 x bfloat> @shuffle_v4bf16(<4 x bfloat> %x, <4 x bfloat> %y) {
; CHECK-LABEL: shuffle_v4bf16:
@@ -39,29 +39,49 @@ define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) {
}
define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) {
-; CHECK-LABEL: shuffle_fv_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI3_0)(a0)
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 9
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: shuffle_fv_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI3_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI3_0)(a0)
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vmv.v.i v0, 9
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: shuffle_fv_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 9
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: slli a0, a0, 62
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
%s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
ret <4 x double> %s
}
define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) {
-; CHECK-LABEL: shuffle_vf_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI4_0)(a0)
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 6
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: shuffle_vf_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI4_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI4_0)(a0)
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vmv.v.i v0, 6
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: shuffle_vf_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 6
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: slli a0, a0, 62
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
ret <4 x double> %s
}
@@ -79,15 +99,25 @@ define <4 x float> @vfmerge_constant_v4f32(<4 x float> %x) {
}
define <4 x double> @vfmerge_constant_v4f64(<4 x double> %x) {
-; CHECK-LABEL: vfmerge_constant_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI6_0)(a0)
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 6
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: vfmerge_constant_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a0, %hi(.LCPI6_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI6_0)(a0)
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vmv.v.i v0, 6
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vfmerge_constant_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 6
+; RV64-NEXT: lui a0, 4101
+; RV64-NEXT: slli a0, a0, 38
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
+; RV64-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> <double poison, double 5.0, double poison, double poison>, <4 x i32> <i32 0, i32 5, i32 5, i32 3>
ret <4 x double> %s
}
@@ -161,40 +191,71 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
}
define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
-; CHECK-LABEL: vrgather_shuffle_xv_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vmv2r.v v10, v8
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vslideup.vi v10, v8, 2, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 12
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vfmv.v.f v8, fa5
-; CHECK-NEXT: vmerge.vvm v8, v8, v10, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: vrgather_shuffle_xv_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vmv.v.i v0, 8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vmv2r.v v10, v8
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vslideup.vi v10, v8, 2, v0.t
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vmv.v.i v0, 12
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vfmv.v.f v8, fa5
+; RV32-NEXT: vmerge.vvm v8, v8, v10, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrgather_shuffle_xv_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 8
+; RV64-NEXT: vmv2r.v v10, v8
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vslideup.vi v10, v8, 2, v0.t
+; RV64-NEXT: slli a0, a0, 62
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 12
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v8, a0
+; RV64-NEXT: vmerge.vvm v8, v8, v10, v0
+; RV64-NEXT: ret
%s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
ret <4 x double> %s
}
define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
-; CHECK-LABEL: vrgather_shuffle_vx_v4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 2
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; CHECK-NEXT: vslidedown.vi v8, v8, 2, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 3
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vfmv.v.f v10, fa5
-; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
-; CHECK-NEXT: ret
+; RV32-LABEL: vrgather_shuffle_vx_v4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vmv.v.i v0, 2
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vslidedown.vi v8, v8, 2, v0.t
+; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT: vmv.v.i v0, 3
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vfmv.v.f v10, fa5
+; RV32-NEXT: vmerge.vvm v8, v10, v8, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vrgather_shuffle_vx_v4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 2
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vslidedown.vi v8, v8, 2, v0.t
+; RV64-NEXT: slli a0, a0, 62
+; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT: vmv.v.i v0, 3
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vmv.v.x v10, a0
+; RV64-NEXT: vmerge.vvm v8, v10, v8, v0
+; RV64-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
ret <4 x double> %s
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index 6ebb03ff0297e..8f2aec3140e9d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <vscale x 1 x bfloat> @llvm.vp.floor.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -407,10 +407,11 @@ declare <vscale x 1 x half> @llvm.vp.floor.nxv1f16(<vscale x 1 x half>, <vscale
define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI12_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -453,10 +454,11 @@ define <vscale x 1 x half> @vp_floor_nxv1f16(<vscale x 1 x half> %va, <vscale x
define <vscale x 1 x half> @vp_floor_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv1f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI13_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -493,10 +495,11 @@ declare <vscale x 2 x half> @llvm.vp.floor.nxv2f16(<vscale x 2 x half>, <vscale
define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI14_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -539,10 +542,11 @@ define <vscale x 2 x half> @vp_floor_nxv2f16(<vscale x 2 x half> %va, <vscale x
define <vscale x 2 x half> @vp_floor_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI15_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -579,10 +583,11 @@ declare <vscale x 4 x half> @llvm.vp.floor.nxv4f16(<vscale x 4 x half>, <vscale
define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -625,10 +630,11 @@ define <vscale x 4 x half> @vp_floor_nxv4f16(<vscale x 4 x half> %va, <vscale x
define <vscale x 4 x half> @vp_floor_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -667,9 +673,10 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -713,10 +720,11 @@ define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x
define <vscale x 8 x half> @vp_floor_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -755,9 +763,10 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v12, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -801,10 +810,11 @@ define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale
define <vscale x 16 x half> @vp_floor_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -843,9 +853,10 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v16, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
@@ -922,10 +933,11 @@ define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale
define <vscale x 32 x half> @vp_floor_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_nxv32f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 2
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -1210,41 +1222,141 @@ define <vscale x 16 x float> @vp_floor_nxv16f32_unmasked(<vscale x 16 x float> %
declare <vscale x 1 x double> @llvm.vp.floor.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @vp_floor_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.floor.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
}
define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv1f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv1f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv1f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv1f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv1f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.floor.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x double> %v
}
@@ -1252,43 +1364,149 @@ define <vscale x 1 x double> @vp_floor_nxv1f64_unmasked(<vscale x 1 x double> %v
declare <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %v
}
define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI37_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
@@ -1296,43 +1514,149 @@ define <vscale x 2 x double> @vp_floor_nxv2f64_unmasked(<vscale x 2 x double> %v
declare <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI39_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x double> %v
}
@@ -1340,43 +1664,149 @@ define <vscale x 4 x double> @vp_floor_nxv4f64_unmasked(<vscale x 4 x double> %v
declare <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv7f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv7f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv7f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv7f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv7f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x double> %v
}
define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv7f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI41_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv7f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv7f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv7f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv7f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x double> %v
}
@@ -1384,43 +1814,149 @@ define <vscale x 7 x double> @vp_floor_nxv7f64_unmasked(<vscale x 7 x double> %v
declare <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI43_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x double> %v
}
@@ -1429,87 +1965,325 @@ define <vscale x 8 x double> @vp_floor_nxv8f64_unmasked(<vscale x 8 x double> %v
declare <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
-; CHECK-NEXT: sub a2, a0, a1
-; CHECK-NEXT: vslidedown.vx v6, v0, a3
-; CHECK-NEXT: sltu a3, a0, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a2, 2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB44_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v7, v0
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT: srli a3, a1, 3
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFH-NEXT: sub a2, a0, a1
+; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFH-NEXT: sltu a3, a0, a2
+; RV32ZVFH-NEXT: addi a3, a3, -1
+; RV32ZVFH-NEXT: and a2, a3, a2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a2, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB44_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v7, v0
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: srli a3, a1, 3
+; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a2, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB44_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT: srli a3, a1, 3
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFHMIN-NEXT: sub a2, a0, a1
+; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFHMIN-NEXT: sltu a3, a0, a2
+; RV32ZVFHMIN-NEXT: addi a3, a3, -1
+; RV32ZVFHMIN-NEXT: and a2, a3, a2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a2, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB44_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: srli a3, a1, 3
+; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a2, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB44_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
}
define <vscale x 16 x double> @vp_floor_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_floor_nxv16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI45_0)
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a2, 2
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB45_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB45_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_floor_nxv16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT: sub a3, a0, a1
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFH-NEXT: sltu a2, a0, a3
+; RV32ZVFH-NEXT: addi a2, a2, -1
+; RV32ZVFH-NEXT: and a2, a2, a3
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a2, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB45_2:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_floor_nxv16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a2, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB45_2:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_floor_nxv16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT: sub a3, a0, a1
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV32ZVFHMIN-NEXT: addi a2, a2, -1
+; RV32ZVFHMIN-NEXT: and a2, a2, a3
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a2, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB45_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_floor_nxv16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a2, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB45_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.floor.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll
index 7a4695d1c25c1..409235f7e1b2c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
declare <vscale x 1 x half> @llvm.experimental.constrained.nearbyint.nxv1f16(<vscale x 1 x half>, metadata, metadata)
@@ -11,10 +11,11 @@ define <vscale x 1 x half> @nearbyint_nxv1f16(<vscale x 1 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -35,10 +36,11 @@ define <vscale x 2 x half> @nearbyint_nxv2f16(<vscale x 2 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -59,10 +61,11 @@ define <vscale x 4 x half> @nearbyint_nxv4f16(<vscale x 4 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -83,10 +86,11 @@ define <vscale x 8 x half> @nearbyint_nxv8f16(<vscale x 8 x half> %v) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -107,10 +111,11 @@ define <vscale x 16 x half> @nearbyint_nxv16f16(<vscale x 16 x half> %v) strictf
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -131,10 +136,11 @@ define <vscale x 32 x half> @nearbyint_nxv32f16(<vscale x 32 x half> %v) strictf
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: frflags a0
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
@@ -271,23 +277,42 @@ define <vscale x 16 x float> @nearbyint_nxv16f32(<vscale x 16 x float> %v) stric
declare <vscale x 1 x double> @llvm.experimental.constrained.nearbyint.nxv1f64(<vscale x 1 x double>, metadata, metadata)
define <vscale x 1 x double> @nearbyint_nxv1f64(<vscale x 1 x double> %v) strictfp {
-; CHECK-LABEL: nearbyint_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: nearbyint_nxv1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_nxv1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%r = call <vscale x 1 x double> @llvm.experimental.constrained.nearbyint.nxv1f64(<vscale x 1 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <vscale x 1 x double> %r
}
@@ -295,23 +320,42 @@ define <vscale x 1 x double> @nearbyint_nxv1f64(<vscale x 1 x double> %v) strict
declare <vscale x 2 x double> @llvm.experimental.constrained.nearbyint.nxv2f64(<vscale x 2 x double>, metadata, metadata)
define <vscale x 2 x double> @nearbyint_nxv2f64(<vscale x 2 x double> %v) strictfp {
-; CHECK-LABEL: nearbyint_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: nearbyint_nxv2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_nxv2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%r = call <vscale x 2 x double> @llvm.experimental.constrained.nearbyint.nxv2f64(<vscale x 2 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <vscale x 2 x double> %r
}
@@ -319,23 +363,42 @@ define <vscale x 2 x double> @nearbyint_nxv2f64(<vscale x 2 x double> %v) strict
declare <vscale x 4 x double> @llvm.experimental.constrained.nearbyint.nxv4f64(<vscale x 4 x double>, metadata, metadata)
define <vscale x 4 x double> @nearbyint_nxv4f64(<vscale x 4 x double> %v) strictfp {
-; CHECK-LABEL: nearbyint_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: nearbyint_nxv4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_nxv4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%r = call <vscale x 4 x double> @llvm.experimental.constrained.nearbyint.nxv4f64(<vscale x 4 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <vscale x 4 x double> %r
}
@@ -343,23 +406,42 @@ define <vscale x 4 x double> @nearbyint_nxv4f64(<vscale x 4 x double> %v) strict
declare <vscale x 8 x double> @llvm.experimental.constrained.nearbyint.nxv8f64(<vscale x 8 x double>, metadata, metadata)
define <vscale x 8 x double> @nearbyint_nxv8f64(<vscale x 8 x double> %v) strictfp {
-; CHECK-LABEL: nearbyint_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32-LABEL: nearbyint_nxv8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: frflags a0
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: fsflags a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: nearbyint_nxv8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: frflags a0
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: fsflags a0
+; RV64-NEXT: ret
%r = call <vscale x 8 x double> @llvm.experimental.constrained.nearbyint.nxv8f64(<vscale x 8 x double> %v, metadata !"round.dynamic", metadata !"fpexcept.strict")
ret <vscale x 8 x double> %r
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
index 4ea3269cec0b1..97e65f4e4b53a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-sdnode.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
define <vscale x 1 x bfloat> @nearbyint_nxv1bf16(<vscale x 1 x bfloat> %x) {
; CHECK-LABEL: nearbyint_nxv1bf16:
@@ -167,10 +167,11 @@ define <vscale x 32 x bfloat> @nearbyint_nxv32bf16(<vscale x 32 x bfloat> %x) {
define <vscale x 1 x half> @nearbyint_nxv1f16(<vscale x 1 x half> %x) {
; ZVFH-LABEL: nearbyint_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -206,10 +207,11 @@ declare <vscale x 1 x half> @llvm.nearbyint.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @nearbyint_nxv2f16(<vscale x 2 x half> %x) {
; ZVFH-LABEL: nearbyint_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -245,10 +247,11 @@ declare <vscale x 2 x half> @llvm.nearbyint.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @nearbyint_nxv4f16(<vscale x 4 x half> %x) {
; ZVFH-LABEL: nearbyint_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI8_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -284,10 +287,11 @@ declare <vscale x 4 x half> @llvm.nearbyint.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @nearbyint_nxv8f16(<vscale x 8 x half> %x) {
; ZVFH-LABEL: nearbyint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI9_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -323,10 +327,11 @@ declare <vscale x 8 x half> @llvm.nearbyint.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @nearbyint_nxv16f16(<vscale x 16 x half> %x) {
; ZVFH-LABEL: nearbyint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI10_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -362,10 +367,11 @@ declare <vscale x 16 x half> @llvm.nearbyint.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @nearbyint_nxv32f16(<vscale x 32 x half> %x) {
; ZVFH-LABEL: nearbyint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -513,80 +519,268 @@ define <vscale x 16 x float> @nearbyint_nxv16f32(<vscale x 16 x float> %x) {
declare <vscale x 16 x float> @llvm.nearbyint.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x double> @nearbyint_nxv1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: nearbyint_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: nearbyint_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: nearbyint_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: nearbyint_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: nearbyint_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x double> @llvm.nearbyint.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.nearbyint.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x double> @nearbyint_nxv2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: nearbyint_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: nearbyint_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: nearbyint_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: nearbyint_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: nearbyint_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.nearbyint.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x double> @nearbyint_nxv4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: nearbyint_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: nearbyint_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: nearbyint_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: nearbyint_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: nearbyint_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x double> @llvm.nearbyint.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.nearbyint.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x double> @nearbyint_nxv8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: nearbyint_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: nearbyint_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: nearbyint_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: nearbyint_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: nearbyint_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x double> @llvm.nearbyint.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll
index 5fe59f3b3933d..5ed921d39590d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/frint-sdnode.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
define <vscale x 1 x bfloat> @rint_nxv1bf16(<vscale x 1 x bfloat> %x) {
; CHECK-LABEL: rint_nxv1bf16:
@@ -153,10 +153,11 @@ define <vscale x 32 x bfloat> @rint_nxv32bf16(<vscale x 32 x bfloat> %x) {
define <vscale x 1 x half> @rint_nxv1f16(<vscale x 1 x half> %x) {
; ZVFH-LABEL: rint_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -188,10 +189,11 @@ declare <vscale x 1 x half> @llvm.rint.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @rint_nxv2f16(<vscale x 2 x half> %x) {
; ZVFH-LABEL: rint_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -223,10 +225,11 @@ declare <vscale x 2 x half> @llvm.rint.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @rint_nxv4f16(<vscale x 4 x half> %x) {
; ZVFH-LABEL: rint_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI8_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -258,10 +261,11 @@ declare <vscale x 4 x half> @llvm.rint.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @rint_nxv8f16(<vscale x 8 x half> %x) {
; ZVFH-LABEL: rint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI9_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
@@ -293,10 +297,11 @@ declare <vscale x 8 x half> @llvm.rint.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @rint_nxv16f16(<vscale x 16 x half> %x) {
; ZVFH-LABEL: rint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI10_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -328,10 +333,11 @@ declare <vscale x 16 x half> @llvm.rint.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @rint_nxv32f16(<vscale x 32 x half> %x) {
; ZVFH-LABEL: rint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -463,72 +469,236 @@ define <vscale x 16 x float> @rint_nxv16f32(<vscale x 16 x float> %x) {
declare <vscale x 16 x float> @llvm.rint.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x double> @rint_nxv1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: rint_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: rint_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: rint_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: rint_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: rint_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.rint.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x double> @rint_nxv2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: rint_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: rint_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: rint_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: rint_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: rint_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.rint.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x double> @rint_nxv4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: rint_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: rint_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: rint_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: rint_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: rint_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.rint.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x double> @rint_nxv8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: rint_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: rint_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: rint_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: rint_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: rint_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x double> @llvm.rint.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll
index 3d992aa13e379..295c264e7d924 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-constrained-sdnode.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type.
@@ -11,10 +11,11 @@ define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -34,10 +35,11 @@ define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -57,10 +59,11 @@ define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -80,10 +83,11 @@ define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -103,10 +107,11 @@ define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -126,10 +131,11 @@ define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: fsrmi a0, 4
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
@@ -260,92 +266,168 @@ define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) strictfp
declare <vscale x 16 x float> @llvm.experimental.constrained.round.nxv16f32(<vscale x 16 x float>, metadata)
define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) strictfp {
-; CHECK-LABEL: round_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_nxv1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_nxv1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 1 x double> @llvm.experimental.constrained.round.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.experimental.constrained.round.nxv1f64(<vscale x 1 x double>, metadata)
define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) strictfp {
-; CHECK-LABEL: round_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_nxv2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_nxv2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 2 x double> @llvm.experimental.constrained.round.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.experimental.constrained.round.nxv2f64(<vscale x 2 x double>, metadata)
define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) strictfp {
-; CHECK-LABEL: round_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_nxv4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_nxv4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 4 x double> @llvm.experimental.constrained.round.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.experimental.constrained.round.nxv4f64(<vscale x 4 x double>, metadata)
define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) strictfp {
-; CHECK-LABEL: round_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: round_nxv8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: fsrmi a0, 4
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: round_nxv8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: fsrmi a0, 4
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 8 x double> @llvm.experimental.constrained.round.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
index f7422b279149f..d420636a573fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
; This file tests the code generation for `llvm.round.*` on scalable vector type.
@@ -169,10 +169,11 @@ define <vscale x 32 x bfloat> @round_nxv32bf16(<vscale x 32 x bfloat> %x) {
define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
; ZVFH-LABEL: round_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -208,10 +209,11 @@ declare <vscale x 1 x half> @llvm.round.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
; ZVFH-LABEL: round_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -247,10 +249,11 @@ declare <vscale x 2 x half> @llvm.round.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
; ZVFH-LABEL: round_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI8_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -286,10 +289,11 @@ declare <vscale x 4 x half> @llvm.round.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
; ZVFH-LABEL: round_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI9_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -325,10 +329,11 @@ declare <vscale x 8 x half> @llvm.round.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
; ZVFH-LABEL: round_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI10_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -364,10 +369,11 @@ declare <vscale x 16 x half> @llvm.round.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
; ZVFH-LABEL: round_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -515,80 +521,268 @@ define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
declare <vscale x 16 x float> @llvm.round.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: round_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x double> @llvm.round.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.round.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: round_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.round.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: round_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x double> @llvm.round.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.round.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: round_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: round_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: round_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: round_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: round_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x double> @llvm.round.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll
index c293ac91b63bf..de766895c734f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type.
@@ -11,10 +11,11 @@ define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -34,10 +35,11 @@ define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -57,10 +59,11 @@ define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -80,10 +83,11 @@ define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
@@ -103,10 +107,11 @@ define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) strictf
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
@@ -126,10 +131,11 @@ define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) strictf
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: fsrmi a0, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
@@ -260,92 +266,168 @@ define <vscale x 16 x float> @roundeven_nxv16f32(<vscale x 16 x float> %x) stric
declare <vscale x 16 x float> @llvm.experimental.constrained.roundeven.nxv16f32(<vscale x 16 x float>, metadata)
define <vscale x 1 x double> @roundeven_nxv1f64(<vscale x 1 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_nxv1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_nxv1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 1 x double> @llvm.experimental.constrained.roundeven.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.experimental.constrained.roundeven.nxv1f64(<vscale x 1 x double>, metadata)
define <vscale x 2 x double> @roundeven_nxv2f64(<vscale x 2 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_nxv2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_nxv2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 2 x double> @llvm.experimental.constrained.roundeven.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.experimental.constrained.roundeven.nxv2f64(<vscale x 2 x double>, metadata)
define <vscale x 4 x double> @roundeven_nxv4f64(<vscale x 4 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_nxv4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_nxv4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 4 x double> @llvm.experimental.constrained.roundeven.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.experimental.constrained.roundeven.nxv4f64(<vscale x 4 x double>, metadata)
define <vscale x 8 x double> @roundeven_nxv8f64(<vscale x 8 x double> %x) strictfp {
-; CHECK-LABEL: roundeven_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: roundeven_nxv8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: fsrmi a0, 0
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32-NEXT: fsrm a0
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: roundeven_nxv8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: fsrmi a0, 0
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64-NEXT: fsrm a0
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 8 x double> @llvm.experimental.constrained.roundeven.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
index 865531b77eb29..b9121c55684ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-sdnode.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
; This file tests the code generation for `llvm.roundeven.*` on scalable vector type.
define <vscale x 1 x bfloat> @roundeven_nxv1bf16(<vscale x 1 x bfloat> %x) {
@@ -168,10 +168,11 @@ define <vscale x 32 x bfloat> @roundeven_nxv32bf16(<vscale x 32 x bfloat> %x) {
define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) {
; ZVFH-LABEL: roundeven_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -207,10 +208,11 @@ declare <vscale x 1 x half> @llvm.roundeven.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) {
; ZVFH-LABEL: roundeven_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -246,10 +248,11 @@ declare <vscale x 2 x half> @llvm.roundeven.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) {
; ZVFH-LABEL: roundeven_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI8_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -285,10 +288,11 @@ declare <vscale x 4 x half> @llvm.roundeven.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) {
; ZVFH-LABEL: roundeven_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI9_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -324,10 +328,11 @@ declare <vscale x 8 x half> @llvm.roundeven.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) {
; ZVFH-LABEL: roundeven_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI10_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -363,10 +368,11 @@ declare <vscale x 16 x half> @llvm.roundeven.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) {
; ZVFH-LABEL: roundeven_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -514,80 +520,268 @@ define <vscale x 16 x float> @roundeven_nxv16f32(<vscale x 16 x float> %x) {
declare <vscale x 16 x float> @llvm.roundeven.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x double> @roundeven_nxv1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: roundeven_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x double> @llvm.roundeven.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.roundeven.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x double> @roundeven_nxv2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: roundeven_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.roundeven.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x double> @roundeven_nxv4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: roundeven_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x double> @llvm.roundeven.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.roundeven.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x double> @roundeven_nxv8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: roundeven_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: roundeven_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: roundeven_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: roundeven_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: roundeven_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x double> @llvm.roundeven.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
index 8a5f118d8f6ac..63cb72e8795e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
@@ -1,18 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) strictfp {
; CHECK-LABEL: trunc_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI0_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -30,10 +31,11 @@ define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI1_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -51,10 +53,11 @@ define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI2_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
@@ -72,10 +75,11 @@ define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI3_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v10, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v10, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
@@ -93,10 +97,11 @@ define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI4_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v12, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v12, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
@@ -114,10 +119,11 @@ define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) strictfp {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI5_0)(a0)
+; CHECK-NEXT: li a0, 25
; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
+; CHECK-NEXT: slli a0, a0, 10
; CHECK-NEXT: vfabs.v v16, v8
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v16, fa5
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
@@ -236,84 +242,152 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) strictfp
declare <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float>, metadata)
define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) strictfp {
-; CHECK-LABEL: trunc_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_nxv1f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI11_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v9, v8
+; RV32-NEXT: vmflt.vf v0, v9, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv1f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v9, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v9, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double>, metadata)
define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) strictfp {
-; CHECK-LABEL: trunc_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_nxv2f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI12_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI12_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v10, v8
+; RV32-NEXT: vmflt.vf v0, v10, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv2f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v10, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v10, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double>, metadata)
define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) strictfp {
-; CHECK-LABEL: trunc_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_nxv4f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI13_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI13_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v12, v8
+; RV32-NEXT: vmflt.vf v0, v12, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv4f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v12, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v12, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double>, metadata)
define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) strictfp {
-; CHECK-LABEL: trunc_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v8
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
-; CHECK-NEXT: vfadd.vv v8, v8, v8, v0.t
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: trunc_nxv8f64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV32-NEXT: vmfne.vv v0, v8, v8
+; RV32-NEXT: lui a0, %hi(.LCPI14_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI14_0)(a0)
+; RV32-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV32-NEXT: vfabs.v v16, v8
+; RV32-NEXT: vmflt.vf v0, v16, fa5
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
+; RV32-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: trunc_nxv8f64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
+; RV64-NEXT: vmfne.vv v0, v8, v8
+; RV64-NEXT: li a0, 1075
+; RV64-NEXT: vfadd.vv v8, v8, v8, v0.t
+; RV64-NEXT: slli a0, a0, 52
+; RV64-NEXT: vfabs.v v16, v8
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vmflt.vf v0, v16, fa5
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
+; RV64-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64-NEXT: ret
%a = call <vscale x 8 x double> @llvm.experimental.constrained.trunc.nxv8f64(<vscale x 8 x double> %x, metadata !"fpexcept.strict")
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
index d597e166be4ee..34b3e8d2849b7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
define <vscale x 1 x bfloat> @trunc_nxv1bf16(<vscale x 1 x bfloat> %x) {
; CHECK-LABEL: trunc_nxv1bf16:
@@ -153,10 +153,11 @@ define <vscale x 32 x bfloat> @trunc_nxv32bf16(<vscale x 32 x bfloat> %x) {
define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
; ZVFH-LABEL: trunc_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI6_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -188,10 +189,11 @@ declare <vscale x 1 x half> @llvm.trunc.nxv1f16(<vscale x 1 x half>)
define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
; ZVFH-LABEL: trunc_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI7_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -223,10 +225,11 @@ declare <vscale x 2 x half> @llvm.trunc.nxv2f16(<vscale x 2 x half>)
define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
; ZVFH-LABEL: trunc_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI8_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -258,10 +261,11 @@ declare <vscale x 4 x half> @llvm.trunc.nxv4f16(<vscale x 4 x half>)
define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
; ZVFH-LABEL: trunc_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI9_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI9_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
@@ -293,10 +297,11 @@ declare <vscale x 8 x half> @llvm.trunc.nxv8f16(<vscale x 8 x half>)
define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
; ZVFH-LABEL: trunc_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI10_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -328,10 +333,11 @@ declare <vscale x 16 x half> @llvm.trunc.nxv16f16(<vscale x 16 x half>)
define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
; ZVFH-LABEL: trunc_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a0, %hi(.LCPI11_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI11_0)(a0)
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -463,72 +469,236 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) {
declare <vscale x 16 x float> @llvm.trunc.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) {
-; CHECK-LABEL: trunc_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI17_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: trunc_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: trunc_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: trunc_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI17_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI17_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: trunc_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.trunc.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) {
-; CHECK-LABEL: trunc_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI18_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: trunc_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: trunc_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: trunc_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI18_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI18_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: trunc_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.trunc.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) {
-; CHECK-LABEL: trunc_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: trunc_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: trunc_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: trunc_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI19_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI19_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: trunc_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.trunc.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) {
-; CHECK-LABEL: trunc_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI20_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: trunc_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: trunc_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: trunc_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI20_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI20_0)(a0)
+; RV32ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: trunc_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%a = call <vscale x 8 x double> @llvm.trunc.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x double> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
index ae0542fb5b74f..d7bf566b9b5f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/half-round-conv.ll
@@ -83,10 +83,11 @@ define <vscale x 1 x i32> @trunc_nxv1f16_to_ui32(<vscale x 1 x half> %x) {
define <vscale x 1 x i64> @trunc_nxv1f16_to_si64(<vscale x 1 x half> %x) {
; CHECK-LABEL: trunc_nxv1f16_to_si64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -104,10 +105,11 @@ define <vscale x 1 x i64> @trunc_nxv1f16_to_si64(<vscale x 1 x half> %x) {
define <vscale x 1 x i64> @trunc_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
; CHECK-LABEL: trunc_nxv1f16_to_ui64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI7_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -201,10 +203,11 @@ define <vscale x 4 x i32> @trunc_nxv4f16_to_ui32(<vscale x 4 x half> %x) {
define <vscale x 4 x i64> @trunc_nxv4f16_to_si64(<vscale x 4 x half> %x) {
; CHECK-LABEL: trunc_nxv4f16_to_si64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI14_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -222,10 +225,11 @@ define <vscale x 4 x i64> @trunc_nxv4f16_to_si64(<vscale x 4 x half> %x) {
define <vscale x 4 x i64> @trunc_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
; CHECK-LABEL: trunc_nxv4f16_to_ui64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI15_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI15_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -331,10 +335,11 @@ define <vscale x 1 x i32> @ceil_nxv1f16_to_ui32(<vscale x 1 x half> %x) {
define <vscale x 1 x i64> @ceil_nxv1f16_to_si64(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_si64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI22_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -388,10 +393,11 @@ define <vscale x 1 x i64> @ceil_nxv1f16_to_si64(<vscale x 1 x half> %x) {
define <vscale x 1 x i64> @ceil_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
; CHECK-LABEL: ceil_nxv1f16_to_ui64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI23_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI23_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -533,10 +539,11 @@ define <vscale x 4 x i32> @ceil_nxv4f16_to_ui32(<vscale x 4 x half> %x) {
define <vscale x 4 x i64> @ceil_nxv4f16_to_si64(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_si64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI30_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI30_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -590,10 +597,11 @@ define <vscale x 4 x i64> @ceil_nxv4f16_to_si64(<vscale x 4 x half> %x) {
define <vscale x 4 x i64> @ceil_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
; CHECK-LABEL: ceil_nxv4f16_to_ui64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI31_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI31_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: fsrmi a0, 3
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -723,10 +731,11 @@ define <vscale x 1 x i32> @rint_nxv1f16_to_ui32(<vscale x 1 x half> %x) {
define <vscale x 1 x i64> @rint_nxv1f16_to_si64(<vscale x 1 x half> %x) {
; CHECK-LABEL: rint_nxv1f16_to_si64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI38_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -778,10 +787,11 @@ define <vscale x 1 x i64> @rint_nxv1f16_to_si64(<vscale x 1 x half> %x) {
define <vscale x 1 x i64> @rint_nxv1f16_to_ui64(<vscale x 1 x half> %x) {
; CHECK-LABEL: rint_nxv1f16_to_ui64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI39_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI39_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -909,10 +919,11 @@ define <vscale x 4 x i32> @rint_nxv4f16_to_ui32(<vscale x 4 x half> %x) {
define <vscale x 4 x i64> @rint_nxv4f16_to_si64(<vscale x 4 x half> %x) {
; CHECK-LABEL: rint_nxv4f16_to_si64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI46_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI46_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -964,10 +975,11 @@ define <vscale x 4 x i64> @rint_nxv4f16_to_si64(<vscale x 4 x half> %x) {
define <vscale x 4 x i64> @rint_nxv4f16_to_ui64(<vscale x 4 x half> %x) {
; CHECK-LABEL: rint_nxv4f16_to_ui64:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI47_0)
-; CHECK-NEXT: flh fa5, %lo(.LCPI47_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfabs.v v9, v8
+; CHECK-NEXT: li a0, 25
+; CHECK-NEXT: slli a0, a0, 10
+; CHECK-NEXT: fmv.h.x fa5, a0
; CHECK-NEXT: vmflt.vf v0, v9, fa5
; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index 9bb5717d6fc25..64e305f130dd7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <vscale x 1 x bfloat> @llvm.vp.nearbyint.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -407,10 +407,11 @@ declare <vscale x 1 x half> @llvm.vp.nearbyint.nxv1f16(<vscale x 1 x half>, <vsc
define <vscale x 1 x half> @vp_nearbyint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI12_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: frflags a0
@@ -453,10 +454,11 @@ define <vscale x 1 x half> @vp_nearbyint_nxv1f16(<vscale x 1 x half> %va, <vscal
define <vscale x 1 x half> @vp_nearbyint_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv1f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI13_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -493,10 +495,11 @@ declare <vscale x 2 x half> @llvm.vp.nearbyint.nxv2f16(<vscale x 2 x half>, <vsc
define <vscale x 2 x half> @vp_nearbyint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI14_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: frflags a0
@@ -539,10 +542,11 @@ define <vscale x 2 x half> @vp_nearbyint_nxv2f16(<vscale x 2 x half> %va, <vscal
define <vscale x 2 x half> @vp_nearbyint_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI15_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -579,10 +583,11 @@ declare <vscale x 4 x half> @llvm.vp.nearbyint.nxv4f16(<vscale x 4 x half>, <vsc
define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: frflags a0
@@ -625,10 +630,11 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
define <vscale x 4 x half> @vp_nearbyint_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -667,9 +673,10 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: frflags a0
@@ -713,10 +720,11 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
define <vscale x 8 x half> @vp_nearbyint_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -755,9 +763,10 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v12, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: frflags a0
@@ -801,10 +810,11 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
define <vscale x 16 x half> @vp_nearbyint_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -843,9 +853,10 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v16, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: frflags a0
@@ -922,10 +933,11 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv32f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: frflags a0
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -1210,41 +1222,141 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32_unmasked(<vscale x 16 x floa
declare <vscale x 1 x double> @llvm.vp.nearbyint.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @vp_nearbyint_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.nearbyint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
}
define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv1f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv1f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv1f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv1f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv1f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.nearbyint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x double> %v
}
@@ -1252,43 +1364,149 @@ define <vscale x 1 x double> @vp_nearbyint_nxv1f64_unmasked(<vscale x 1 x double
declare <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %v
}
define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI37_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
@@ -1296,43 +1514,149 @@ define <vscale x 2 x double> @vp_nearbyint_nxv2f64_unmasked(<vscale x 2 x double
declare <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI39_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x double> %v
}
@@ -1340,43 +1664,149 @@ define <vscale x 4 x double> @vp_nearbyint_nxv4f64_unmasked(<vscale x 4 x double
declare <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv7f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv7f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv7f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv7f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x double> %v
}
define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv7f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI41_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv7f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv7f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv7f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv7f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x double> %v
}
@@ -1384,43 +1814,149 @@ define <vscale x 7 x double> @vp_nearbyint_nxv7f64_unmasked(<vscale x 7 x double
declare <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI43_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x double> %v
}
@@ -1429,87 +1965,325 @@ define <vscale x 8 x double> @vp_nearbyint_nxv8f64_unmasked(<vscale x 8 x double
declare <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
-; CHECK-NEXT: sub a2, a0, a1
-; CHECK-NEXT: vslidedown.vx v6, v0, a3
-; CHECK-NEXT: sltu a3, a0, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: frflags a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsflags a2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB44_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v7, v0
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT: srli a3, a1, 3
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFH-NEXT: sub a2, a0, a1
+; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFH-NEXT: sltu a3, a0, a2
+; RV32ZVFH-NEXT: addi a3, a3, -1
+; RV32ZVFH-NEXT: and a2, a3, a2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: frflags a2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: fsflags a2
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB44_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v7, v0
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: srli a3, a1, 3
+; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: frflags a2
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: fsflags a2
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB44_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT: srli a3, a1, 3
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFHMIN-NEXT: sub a2, a0, a1
+; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFHMIN-NEXT: sltu a3, a0, a2
+; RV32ZVFHMIN-NEXT: addi a3, a3, -1
+; RV32ZVFHMIN-NEXT: and a2, a3, a2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: frflags a2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a2
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB44_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: srli a3, a1, 3
+; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: frflags a2
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a2
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB44_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
}
define <vscale x 16 x double> @vp_nearbyint_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_nearbyint_nxv16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI45_0)
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: frflags a2
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: fsflags a2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB45_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB45_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_nearbyint_nxv16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT: sub a3, a0, a1
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFH-NEXT: sltu a2, a0, a3
+; RV32ZVFH-NEXT: addi a2, a2, -1
+; RV32ZVFH-NEXT: and a2, a2, a3
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: frflags a2
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: fsflags a2
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB45_2:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: frflags a0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: fsflags a0
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_nearbyint_nxv16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: frflags a2
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: fsflags a2
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB45_2:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: frflags a0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: fsflags a0
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_nearbyint_nxv16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT: sub a3, a0, a1
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV32ZVFHMIN-NEXT: addi a2, a2, -1
+; RV32ZVFHMIN-NEXT: and a2, a2, a3
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: frflags a2
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a2
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB45_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: frflags a0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsflags a0
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_nearbyint_nxv16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: frflags a2
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a2
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB45_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: frflags a0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsflags a0
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index a9505dca97529..091caa6c65fd2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <vscale x 1 x bfloat> @llvm.vp.rint.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -379,10 +379,11 @@ declare <vscale x 1 x half> @llvm.vp.rint.nxv1f16(<vscale x 1 x half>, <vscale x
define <vscale x 1 x half> @vp_rint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI12_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
@@ -421,10 +422,11 @@ define <vscale x 1 x half> @vp_rint_nxv1f16(<vscale x 1 x half> %va, <vscale x 1
define <vscale x 1 x half> @vp_rint_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv1f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI13_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -457,10 +459,11 @@ declare <vscale x 2 x half> @llvm.vp.rint.nxv2f16(<vscale x 2 x half>, <vscale x
define <vscale x 2 x half> @vp_rint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI14_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
@@ -499,10 +502,11 @@ define <vscale x 2 x half> @vp_rint_nxv2f16(<vscale x 2 x half> %va, <vscale x 2
define <vscale x 2 x half> @vp_rint_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI15_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -535,10 +539,11 @@ declare <vscale x 4 x half> @llvm.vp.rint.nxv4f16(<vscale x 4 x half>, <vscale x
define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
@@ -577,10 +582,11 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
define <vscale x 4 x half> @vp_rint_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
@@ -615,9 +621,10 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: vmv1r.v v0, v10
@@ -657,10 +664,11 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
define <vscale x 8 x half> @vp_rint_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
@@ -695,9 +703,10 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v12, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: vmv1r.v v0, v12
@@ -737,10 +746,11 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
define <vscale x 16 x half> @vp_rint_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -775,9 +785,10 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v16, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: vmv1r.v v0, v16
@@ -848,10 +859,11 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv32f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -1110,37 +1122,125 @@ define <vscale x 16 x float> @vp_rint_nxv16f32_unmasked(<vscale x 16 x float> %v
declare <vscale x 1 x double> @llvm.vp.rint.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @vp_rint_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv1f64:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.rint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
}
define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv1f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv1f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv1f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv1f64_unmasked:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv1f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.rint.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x double> %v
}
@@ -1148,39 +1248,133 @@ define <vscale x 1 x double> @vp_rint_nxv1f64_unmasked(<vscale x 1 x double> %va
declare <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv2f64:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %v
}
define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI37_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv2f64_unmasked:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
@@ -1188,39 +1382,133 @@ define <vscale x 2 x double> @vp_rint_nxv2f64_unmasked(<vscale x 2 x double> %va
declare <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv4f64:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI39_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv4f64_unmasked:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x double> %v
}
@@ -1228,39 +1516,133 @@ define <vscale x 4 x double> @vp_rint_nxv4f64_unmasked(<vscale x 4 x double> %va
declare <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv7f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv7f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv7f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv7f64:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv7f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x double> %v
}
define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv7f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI41_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv7f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv7f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv7f64_unmasked:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv7f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x double> %v
}
@@ -1268,39 +1650,133 @@ define <vscale x 7 x double> @vp_rint_nxv7f64_unmasked(<vscale x 7 x double> %va
declare <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv8f64:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFMIN-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI43_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv8f64_unmasked:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x double> %v
}
@@ -1309,79 +1785,293 @@ define <vscale x 8 x double> @vp_rint_nxv8f64_unmasked(<vscale x 8 x double> %va
declare <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
-; CHECK-NEXT: sub a2, a0, a1
-; CHECK-NEXT: vslidedown.vx v6, v0, a3
-; CHECK-NEXT: sltu a3, a0, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB44_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v7, v0
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT: srli a3, a1, 3
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFH-NEXT: sub a2, a0, a1
+; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFH-NEXT: sltu a3, a0, a2
+; RV32ZVFH-NEXT: addi a3, a3, -1
+; RV32ZVFH-NEXT: and a2, a3, a2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB44_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v7, v0
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: srli a3, a1, 3
+; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB44_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv16f64:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFMIN-NEXT: vmv1r.v v7, v0
+; RV32ZVFMIN-NEXT: csrr a1, vlenb
+; RV32ZVFMIN-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFMIN-NEXT: srli a3, a1, 3
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFMIN-NEXT: sub a2, a0, a1
+; RV32ZVFMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFMIN-NEXT: sltu a3, a0, a2
+; RV32ZVFMIN-NEXT: addi a3, a3, -1
+; RV32ZVFMIN-NEXT: and a2, a3, a2
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFMIN-NEXT: # %bb.1:
+; RV32ZVFMIN-NEXT: mv a0, a1
+; RV32ZVFMIN-NEXT: .LBB44_2:
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: srli a3, a1, 3
+; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB44_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
}
define <vscale x 16 x double> @vp_rint_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_rint_nxv16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI45_0)
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB45_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB45_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_rint_nxv16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT: sub a3, a0, a1
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFH-NEXT: sltu a2, a0, a3
+; RV32ZVFH-NEXT: addi a2, a2, -1
+; RV32ZVFH-NEXT: and a2, a2, a3
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB45_2:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_rint_nxv16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB45_2:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFMIN-LABEL: vp_rint_nxv16f64_unmasked:
+; RV32ZVFMIN: # %bb.0:
+; RV32ZVFMIN-NEXT: csrr a1, vlenb
+; RV32ZVFMIN-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFMIN-NEXT: sub a3, a0, a1
+; RV32ZVFMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFMIN-NEXT: sltu a2, a0, a3
+; RV32ZVFMIN-NEXT: addi a2, a2, -1
+; RV32ZVFMIN-NEXT: and a2, a2, a3
+; RV32ZVFMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFMIN-NEXT: # %bb.1:
+; RV32ZVFMIN-NEXT: mv a0, a1
+; RV32ZVFMIN-NEXT: .LBB45_2:
+; RV32ZVFMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_rint_nxv16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB45_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.rint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index ccbc0ebb3b73e..d1ea5aa76268a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <vscale x 1 x bfloat> @llvm.vp.round.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -407,10 +407,11 @@ declare <vscale x 1 x half> @llvm.vp.round.nxv1f16(<vscale x 1 x half>, <vscale
define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI12_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -453,10 +454,11 @@ define <vscale x 1 x half> @vp_round_nxv1f16(<vscale x 1 x half> %va, <vscale x
define <vscale x 1 x half> @vp_round_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv1f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI13_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -493,10 +495,11 @@ declare <vscale x 2 x half> @llvm.vp.round.nxv2f16(<vscale x 2 x half>, <vscale
define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI14_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -539,10 +542,11 @@ define <vscale x 2 x half> @vp_round_nxv2f16(<vscale x 2 x half> %va, <vscale x
define <vscale x 2 x half> @vp_round_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI15_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -579,10 +583,11 @@ declare <vscale x 4 x half> @llvm.vp.round.nxv4f16(<vscale x 4 x half>, <vscale
define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -625,10 +630,11 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
define <vscale x 4 x half> @vp_round_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -667,9 +673,10 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -713,10 +720,11 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
define <vscale x 8 x half> @vp_round_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -755,9 +763,10 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v12, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -801,10 +810,11 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
define <vscale x 16 x half> @vp_round_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -843,9 +853,10 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v16, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
@@ -922,10 +933,11 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv32f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 4
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -1210,41 +1222,141 @@ define <vscale x 16 x float> @vp_round_nxv16f32_unmasked(<vscale x 16 x float> %
declare <vscale x 1 x double> @llvm.vp.round.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @vp_round_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.round.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
}
define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv1f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv1f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv1f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv1f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv1f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.round.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x double> %v
}
@@ -1252,43 +1364,149 @@ define <vscale x 1 x double> @vp_round_nxv1f64_unmasked(<vscale x 1 x double> %v
declare <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %v
}
define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI37_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
@@ -1296,43 +1514,149 @@ define <vscale x 2 x double> @vp_round_nxv2f64_unmasked(<vscale x 2 x double> %v
declare <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI39_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x double> %v
}
@@ -1340,43 +1664,149 @@ define <vscale x 4 x double> @vp_round_nxv4f64_unmasked(<vscale x 4 x double> %v
declare <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv7f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv7f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv7f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv7f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv7f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x double> %v
}
define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv7f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI41_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv7f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv7f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv7f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv7f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x double> %v
}
@@ -1384,43 +1814,149 @@ define <vscale x 7 x double> @vp_round_nxv7f64_unmasked(<vscale x 7 x double> %v
declare <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI43_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x double> %v
}
@@ -1429,87 +1965,325 @@ define <vscale x 8 x double> @vp_round_nxv8f64_unmasked(<vscale x 8 x double> %v
declare <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
-; CHECK-NEXT: sub a2, a0, a1
-; CHECK-NEXT: vslidedown.vx v6, v0, a3
-; CHECK-NEXT: sltu a3, a0, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a2, 4
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB44_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v7, v0
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT: srli a3, a1, 3
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFH-NEXT: sub a2, a0, a1
+; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFH-NEXT: sltu a3, a0, a2
+; RV32ZVFH-NEXT: addi a3, a3, -1
+; RV32ZVFH-NEXT: and a2, a3, a2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a2, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB44_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v7, v0
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: srli a3, a1, 3
+; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a2, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB44_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT: srli a3, a1, 3
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFHMIN-NEXT: sub a2, a0, a1
+; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFHMIN-NEXT: sltu a3, a0, a2
+; RV32ZVFHMIN-NEXT: addi a3, a3, -1
+; RV32ZVFHMIN-NEXT: and a2, a3, a2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a2, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB44_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: srli a3, a1, 3
+; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a2, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB44_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
}
define <vscale x 16 x double> @vp_round_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_round_nxv16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI45_0)
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a2, 4
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB45_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB45_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_round_nxv16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT: sub a3, a0, a1
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFH-NEXT: sltu a2, a0, a3
+; RV32ZVFH-NEXT: addi a2, a2, -1
+; RV32ZVFH-NEXT: and a2, a2, a3
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a2, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB45_2:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 4
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_round_nxv16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a2, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB45_2:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 4
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_round_nxv16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT: sub a3, a0, a1
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV32ZVFHMIN-NEXT: addi a2, a2, -1
+; RV32ZVFHMIN-NEXT: and a2, a2, a3
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a2, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB45_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 4
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_round_nxv16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a2, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB45_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 4
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.round.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index 3975423e6f985..23d0e97c1c82b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <vscale x 1 x bfloat> @llvm.vp.roundeven.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -407,10 +407,11 @@ declare <vscale x 1 x half> @llvm.vp.roundeven.nxv1f16(<vscale x 1 x half>, <vsc
define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI12_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -453,10 +454,11 @@ define <vscale x 1 x half> @vp_roundeven_nxv1f16(<vscale x 1 x half> %va, <vscal
define <vscale x 1 x half> @vp_roundeven_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv1f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI13_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -493,10 +495,11 @@ declare <vscale x 2 x half> @llvm.vp.roundeven.nxv2f16(<vscale x 2 x half>, <vsc
define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI14_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -539,10 +542,11 @@ define <vscale x 2 x half> @vp_roundeven_nxv2f16(<vscale x 2 x half> %va, <vscal
define <vscale x 2 x half> @vp_roundeven_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI15_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -579,10 +583,11 @@ declare <vscale x 4 x half> @llvm.vp.roundeven.nxv4f16(<vscale x 4 x half>, <vsc
define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -625,10 +630,11 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
define <vscale x 4 x half> @vp_roundeven_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -667,9 +673,10 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -713,10 +720,11 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
define <vscale x 8 x half> @vp_roundeven_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -755,9 +763,10 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v12, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -801,10 +810,11 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
define <vscale x 16 x half> @vp_roundeven_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -843,9 +853,10 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v16, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
@@ -922,10 +933,11 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv32f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 0
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -1210,41 +1222,141 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32_unmasked(<vscale x 16 x floa
declare <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @vp_roundeven_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
}
define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv1f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv1f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv1f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv1f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv1f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.roundeven.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x double> %v
}
@@ -1252,43 +1364,149 @@ define <vscale x 1 x double> @vp_roundeven_nxv1f64_unmasked(<vscale x 1 x double
declare <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %v
}
define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI37_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
@@ -1296,43 +1514,149 @@ define <vscale x 2 x double> @vp_roundeven_nxv2f64_unmasked(<vscale x 2 x double
declare <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI39_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x double> %v
}
@@ -1340,43 +1664,149 @@ define <vscale x 4 x double> @vp_roundeven_nxv4f64_unmasked(<vscale x 4 x double
declare <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv7f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv7f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv7f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv7f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv7f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x double> %v
}
define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv7f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI41_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv7f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv7f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv7f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv7f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x double> %v
}
@@ -1384,43 +1814,149 @@ define <vscale x 7 x double> @vp_roundeven_nxv7f64_unmasked(<vscale x 7 x double
declare <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI43_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x double> %v
}
@@ -1429,87 +1965,325 @@ define <vscale x 8 x double> @vp_roundeven_nxv8f64_unmasked(<vscale x 8 x double
declare <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
-; CHECK-NEXT: sub a2, a0, a1
-; CHECK-NEXT: vslidedown.vx v6, v0, a3
-; CHECK-NEXT: sltu a3, a0, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a2, 0
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB44_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v7, v0
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT: srli a3, a1, 3
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFH-NEXT: sub a2, a0, a1
+; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFH-NEXT: sltu a3, a0, a2
+; RV32ZVFH-NEXT: addi a3, a3, -1
+; RV32ZVFH-NEXT: and a2, a3, a2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a2, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB44_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v7, v0
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: srli a3, a1, 3
+; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a2, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB44_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT: srli a3, a1, 3
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFHMIN-NEXT: sub a2, a0, a1
+; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFHMIN-NEXT: sltu a3, a0, a2
+; RV32ZVFHMIN-NEXT: addi a3, a3, -1
+; RV32ZVFHMIN-NEXT: and a2, a3, a2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a2, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB44_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: srli a3, a1, 3
+; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a2, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB44_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
}
define <vscale x 16 x double> @vp_roundeven_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundeven_nxv16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI45_0)
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a2, 0
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB45_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB45_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundeven_nxv16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT: sub a3, a0, a1
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFH-NEXT: sltu a2, a0, a3
+; RV32ZVFH-NEXT: addi a2, a2, -1
+; RV32ZVFH-NEXT: and a2, a2, a3
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a2, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB45_2:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 0
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundeven_nxv16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a2, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB45_2:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 0
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundeven_nxv16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT: sub a3, a0, a1
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV32ZVFHMIN-NEXT: addi a2, a2, -1
+; RV32ZVFHMIN-NEXT: and a2, a2, a3
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a2, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB45_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 0
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundeven_nxv16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a2, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB45_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 0
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.roundeven.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 7f617f48862c4..4d8066d12c9ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -1,16 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV32ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFH
+; RUN: --check-prefixes=CHECK,ZVFH,RV64ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=ilp32d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV32ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
-; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: --check-prefixes=CHECK,ZVFHMIN,RV64ZVFHMIN
declare <vscale x 1 x bfloat> @llvm.vp.roundtozero.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
@@ -407,10 +407,11 @@ declare <vscale x 1 x half> @llvm.vp.roundtozero.nxv1f16(<vscale x 1 x half>, <v
define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI12_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI12_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -453,10 +454,11 @@ define <vscale x 1 x half> @vp_roundtozero_nxv1f16(<vscale x 1 x half> %va, <vsc
define <vscale x 1 x half> @vp_roundtozero_nxv1f16_unmasked(<vscale x 1 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv1f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI13_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI13_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -493,10 +495,11 @@ declare <vscale x 2 x half> @llvm.vp.roundtozero.nxv2f16(<vscale x 2 x half>, <v
define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI14_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI14_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -539,10 +542,11 @@ define <vscale x 2 x half> @vp_roundtozero_nxv2f16(<vscale x 2 x half> %va, <vsc
define <vscale x 2 x half> @vp_roundtozero_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv2f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI15_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI15_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -579,10 +583,11 @@ declare <vscale x 4 x half> @llvm.vp.roundtozero.nxv4f16(<vscale x 4 x half>, <v
define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI16_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI16_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -625,10 +630,11 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
define <vscale x 4 x half> @vp_roundtozero_nxv4f16_unmasked(<vscale x 4 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv4f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI17_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI17_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
@@ -667,9 +673,10 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v10, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI18_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI18_0)(a0)
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -713,10 +720,11 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
define <vscale x 8 x half> @vp_roundtozero_nxv8f16_unmasked(<vscale x 8 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv8f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI19_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI19_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v10, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v10, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
@@ -755,9 +763,10 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v12, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI20_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI20_0)(a0)
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -801,10 +810,11 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
define <vscale x 16 x half> @vp_roundtozero_nxv16f16_unmasked(<vscale x 16 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv16f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI21_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI21_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v12, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
@@ -843,9 +853,10 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v16, v0
-; ZVFH-NEXT: lui a0, %hi(.LCPI22_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI22_0)(a0)
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
@@ -922,10 +933,11 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x half> %va, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv32f16_unmasked:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: lui a1, %hi(.LCPI23_0)
-; ZVFH-NEXT: flh fa5, %lo(.LCPI23_0)(a1)
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8
+; ZVFH-NEXT: li a0, 25
+; ZVFH-NEXT: slli a0, a0, 10
+; ZVFH-NEXT: fmv.h.x fa5, a0
; ZVFH-NEXT: vmflt.vf v0, v16, fa5
; ZVFH-NEXT: fsrmi a0, 1
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
@@ -1210,41 +1222,141 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32_unmasked(<vscale x 16 x fl
declare <vscale x 1 x double> @llvm.vp.roundtozero.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @vp_roundtozero_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv1f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI34_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vf v0, v9, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv1f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv1f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI34_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI34_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv1f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.roundtozero.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
}
define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv1f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI35_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfabs.v v9, v8
-; CHECK-NEXT: vmflt.vf v0, v9, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v9, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v9, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv1f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v9, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv1f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v9, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv1f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI35_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI35_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv1f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v9, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v9, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v9, v9, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m1, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v9, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.roundtozero.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x double> %v
}
@@ -1252,43 +1364,149 @@ define <vscale x 1 x double> @vp_roundtozero_nxv1f64_unmasked(<vscale x 1 x doub
declare <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI36_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
-; CHECK-NEXT: vfabs.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v10, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v10
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv2f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v10, v0
+; RV64ZVFH-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v10
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv2f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI36_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI36_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv2f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v10, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v10, v12, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v10
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %v
}
define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv2f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI37_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vfabs.v v10, v8
-; CHECK-NEXT: vmflt.vf v0, v10, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v10, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v10, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v10, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv2f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v10, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv2f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v10, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv2f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI37_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI37_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv2f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v10, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v10, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v10, v10, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v10, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x double> %v
}
@@ -1296,43 +1514,149 @@ define <vscale x 2 x double> @vp_roundtozero_nxv2f64_unmasked(<vscale x 2 x doub
declare <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v12, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI38_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v12
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v12, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v12
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv4f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v12, v0
+; RV64ZVFH-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v12
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv4f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI38_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI38_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv4f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v12, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v12, v16, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v12
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %v
}
define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv4f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI39_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vfabs.v v12, v8
-; CHECK-NEXT: vmflt.vf v0, v12, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v12, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv4f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v12, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv4f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v12, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv4f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI39_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI39_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv4f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v12, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v12, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v12, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x double> %v
}
@@ -1340,43 +1664,149 @@ define <vscale x 4 x double> @vp_roundtozero_nxv4f64_unmasked(<vscale x 4 x doub
declare <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double>, <vscale x 7 x i1>, i32)
define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv7f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI40_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv7f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv7f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI40_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI40_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv7f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 %evl)
ret <vscale x 7 x double> %v
}
define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv7f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI41_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv7f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv7f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv7f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI41_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI41_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv7f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> splat (i1 true), i32 %evl)
ret <vscale x 7 x double> %v
}
@@ -1384,43 +1814,149 @@ define <vscale x 7 x double> @vp_roundtozero_nxv7f64_unmasked(<vscale x 7 x doub
declare <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v16, v0
-; CHECK-NEXT: lui a0, %hi(.LCPI42_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v16
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v16, v0
+; RV32ZVFH-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v16
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv8f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v16, v0
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v16
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv8f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV32ZVFHMIN-NEXT: lui a0, %hi(.LCPI42_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI42_0)(a0)
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv8f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v16, v0
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v16, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v16
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %v
}
define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv8f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI43_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v16, v8
-; CHECK-NEXT: vmflt.vf v0, v16, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv8f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v16, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv8f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v16, v8
+; RV64ZVFH-NEXT: li a0, 1075
+; RV64ZVFH-NEXT: slli a0, a0, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a0
+; RV64ZVFH-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv8f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: lui a1, %hi(.LCPI43_0)
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI43_0)(a1)
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv8f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v16, v8
+; RV64ZVFHMIN-NEXT: li a0, 1075
+; RV64ZVFHMIN-NEXT: slli a0, a0, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a0
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v16, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x double> %v
}
@@ -1429,87 +1965,325 @@ define <vscale x 8 x double> @vp_roundtozero_nxv8f64_unmasked(<vscale x 8 x doub
declare <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double>, <vscale x 16 x i1>, i32)
define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI44_0)
-; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
-; CHECK-NEXT: sub a2, a0, a1
-; CHECK-NEXT: vslidedown.vx v6, v0, a3
-; CHECK-NEXT: sltu a3, a0, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a2, 1
-; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB44_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB44_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFH-NEXT: vmv1r.v v7, v0
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFH-NEXT: srli a3, a1, 3
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFH-NEXT: sub a2, a0, a1
+; RV32ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFH-NEXT: sltu a3, a0, a2
+; RV32ZVFH-NEXT: addi a3, a3, -1
+; RV32ZVFH-NEXT: and a2, a3, a2
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a2, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v6
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB44_2:
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vmv1r.v v0, v7
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv16f64:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFH-NEXT: vmv1r.v v7, v0
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: srli a3, a1, 3
+; RV64ZVFH-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a2, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v6
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB44_2:
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vmv1r.v v0, v7
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv16f64:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV32ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI44_0)
+; RV32ZVFHMIN-NEXT: srli a3, a1, 3
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI44_0)(a2)
+; RV32ZVFHMIN-NEXT: sub a2, a0, a1
+; RV32ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV32ZVFHMIN-NEXT: sltu a3, a0, a2
+; RV32ZVFHMIN-NEXT: addi a3, a3, -1
+; RV32ZVFHMIN-NEXT: and a2, a3, a2
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a2, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB44_2:
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv16f64:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; RV64ZVFHMIN-NEXT: vmv1r.v v7, v0
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: srli a3, a1, 3
+; RV64ZVFHMIN-NEXT: vslidedown.vx v6, v0, a3
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a2, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v6
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB44_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB44_2:
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vmv1r.v v0, v7
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
}
define <vscale x 16 x double> @vp_roundtozero_nxv16f64_unmasked(<vscale x 16 x double> %va, i32 zeroext %evl) {
-; CHECK-LABEL: vp_roundtozero_nxv16f64_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: lui a2, %hi(.LCPI45_0)
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a2, 1
-; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
-; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB45_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB45_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v8
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
-; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
-; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: ret
+; RV32ZVFH-LABEL: vp_roundtozero_nxv16f64_unmasked:
+; RV32ZVFH: # %bb.0:
+; RV32ZVFH-NEXT: csrr a1, vlenb
+; RV32ZVFH-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFH-NEXT: sub a3, a0, a1
+; RV32ZVFH-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFH-NEXT: sltu a2, a0, a3
+; RV32ZVFH-NEXT: addi a2, a2, -1
+; RV32ZVFH-NEXT: and a2, a2, a3
+; RV32ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v16
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a2, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFH-NEXT: fsrm a2
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFH-NEXT: # %bb.1:
+; RV32ZVFH-NEXT: mv a0, a1
+; RV32ZVFH-NEXT: .LBB45_2:
+; RV32ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFH-NEXT: vfabs.v v24, v8
+; RV32ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFH-NEXT: fsrmi a0, 1
+; RV32ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFH-NEXT: fsrm a0
+; RV32ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFH-NEXT: ret
+;
+; RV64ZVFH-LABEL: vp_roundtozero_nxv16f64_unmasked:
+; RV64ZVFH: # %bb.0:
+; RV64ZVFH-NEXT: csrr a1, vlenb
+; RV64ZVFH-NEXT: li a2, 1075
+; RV64ZVFH-NEXT: sub a3, a0, a1
+; RV64ZVFH-NEXT: slli a2, a2, 52
+; RV64ZVFH-NEXT: fmv.d.x fa5, a2
+; RV64ZVFH-NEXT: sltu a2, a0, a3
+; RV64ZVFH-NEXT: addi a2, a2, -1
+; RV64ZVFH-NEXT: and a2, a2, a3
+; RV64ZVFH-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v16
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a2, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFH-NEXT: fsrm a2
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFH-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFH-NEXT: # %bb.1:
+; RV64ZVFH-NEXT: mv a0, a1
+; RV64ZVFH-NEXT: .LBB45_2:
+; RV64ZVFH-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFH-NEXT: vfabs.v v24, v8
+; RV64ZVFH-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFH-NEXT: fsrmi a0, 1
+; RV64ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFH-NEXT: fsrm a0
+; RV64ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFH-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFH-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFH-NEXT: ret
+;
+; RV32ZVFHMIN-LABEL: vp_roundtozero_nxv16f64_unmasked:
+; RV32ZVFHMIN: # %bb.0:
+; RV32ZVFHMIN-NEXT: csrr a1, vlenb
+; RV32ZVFHMIN-NEXT: lui a2, %hi(.LCPI45_0)
+; RV32ZVFHMIN-NEXT: sub a3, a0, a1
+; RV32ZVFHMIN-NEXT: fld fa5, %lo(.LCPI45_0)(a2)
+; RV32ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV32ZVFHMIN-NEXT: addi a2, a2, -1
+; RV32ZVFHMIN-NEXT: and a2, a2, a3
+; RV32ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a2, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a2
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV32ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV32ZVFHMIN-NEXT: # %bb.1:
+; RV32ZVFHMIN-NEXT: mv a0, a1
+; RV32ZVFHMIN-NEXT: .LBB45_2:
+; RV32ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV32ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV32ZVFHMIN-NEXT: fsrmi a0, 1
+; RV32ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: fsrm a0
+; RV32ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV32ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV32ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV32ZVFHMIN-NEXT: ret
+;
+; RV64ZVFHMIN-LABEL: vp_roundtozero_nxv16f64_unmasked:
+; RV64ZVFHMIN: # %bb.0:
+; RV64ZVFHMIN-NEXT: csrr a1, vlenb
+; RV64ZVFHMIN-NEXT: li a2, 1075
+; RV64ZVFHMIN-NEXT: sub a3, a0, a1
+; RV64ZVFHMIN-NEXT: slli a2, a2, 52
+; RV64ZVFHMIN-NEXT: fmv.d.x fa5, a2
+; RV64ZVFHMIN-NEXT: sltu a2, a0, a3
+; RV64ZVFHMIN-NEXT: addi a2, a2, -1
+; RV64ZVFHMIN-NEXT: and a2, a2, a3
+; RV64ZVFHMIN-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v16
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a2, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a2
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
+; RV64ZVFHMIN-NEXT: bltu a0, a1, .LBB45_2
+; RV64ZVFHMIN-NEXT: # %bb.1:
+; RV64ZVFHMIN-NEXT: mv a0, a1
+; RV64ZVFHMIN-NEXT: .LBB45_2:
+; RV64ZVFHMIN-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64ZVFHMIN-NEXT: vfabs.v v24, v8
+; RV64ZVFHMIN-NEXT: vmflt.vf v0, v24, fa5
+; RV64ZVFHMIN-NEXT: fsrmi a0, 1
+; RV64ZVFHMIN-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: fsrm a0
+; RV64ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; RV64ZVFHMIN-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64ZVFHMIN-NEXT: vfsgnj.vv v8, v24, v8, v0.t
+; RV64ZVFHMIN-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.roundtozero.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x double> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll
index 03e6e6b7a624d..7e580d1057525 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp-combine.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zvfh,+v,+m -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV32 %s
; RUN: llc -mtriple=riscv64 -mattr=+d,+zvfh,+v,+m -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck --check-prefixes=CHECK,RV64 %s
declare <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
declare <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
@@ -24,17 +24,30 @@ define <vscale x 1 x double> @test1(<vscale x 1 x double> %a, <vscale x 1 x doub
; (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2)
define <vscale x 1 x double> @test2(<vscale x 1 x double> %a, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: test2:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI1_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI1_0)(a1)
-; CHECK-NEXT: lui a1, %hi(.LCPI1_1)
-; CHECK-NEXT: fld fa4, %lo(.LCPI1_1)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfmv.v.f v9, fa5
-; CHECK-NEXT: vfadd.vf v9, v9, fa4, v0.t
-; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
-; CHECK-NEXT: ret
+; RV32-LABEL: test2:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI1_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI1_0)(a1)
+; RV32-NEXT: lui a1, %hi(.LCPI1_1)
+; RV32-NEXT: fld fa4, %lo(.LCPI1_1)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vfmv.v.f v9, fa5
+; RV32-NEXT: vfadd.vf v9, v9, fa4, v0.t
+; RV32-NEXT: vfmul.vv v8, v8, v9, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test2:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 1025
+; RV64-NEXT: slli a1, a1, 52
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vmv.v.x v9, a1
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: slli a0, a0, 62
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vfadd.vf v9, v9, fa5, v0.t
+; RV64-NEXT: vfmul.vv v8, v8, v9, v0.t
+; RV64-NEXT: ret
%t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 2.0), <vscale x 1 x i1> %m, i32 %evl)
%v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 4.0), <vscale x 1 x double> %t, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
@@ -42,18 +55,32 @@ define <vscale x 1 x double> @test2(<vscale x 1 x double> %a, <vscale x 1 x i1>
; (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
define <vscale x 1 x double> @test3(<vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: test3:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, %hi(.LCPI2_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI2_0)(a1)
-; CHECK-NEXT: lui a1, %hi(.LCPI2_1)
-; CHECK-NEXT: fld fa4, %lo(.LCPI2_1)(a1)
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfmv.v.f v10, fa5
-; CHECK-NEXT: vfmul.vf v10, v10, fa4, v0.t
-; CHECK-NEXT: vfmadd.vv v10, v8, v9, v0.t
-; CHECK-NEXT: vmv.v.v v8, v10
-; CHECK-NEXT: ret
+; RV32-LABEL: test3:
+; RV32: # %bb.0:
+; RV32-NEXT: lui a1, %hi(.LCPI2_0)
+; RV32-NEXT: fld fa5, %lo(.LCPI2_0)(a1)
+; RV32-NEXT: lui a1, %hi(.LCPI2_1)
+; RV32-NEXT: fld fa4, %lo(.LCPI2_1)(a1)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vfmv.v.f v10, fa5
+; RV32-NEXT: vfmul.vf v10, v10, fa4, v0.t
+; RV32-NEXT: vfmadd.vv v10, v8, v9, v0.t
+; RV32-NEXT: vmv.v.v v8, v10
+; RV32-NEXT: ret
+;
+; RV64-LABEL: test3:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 1025
+; RV64-NEXT: slli a1, a1, 52
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: vmv.v.x v10, a1
+; RV64-NEXT: li a0, 1
+; RV64-NEXT: slli a0, a0, 62
+; RV64-NEXT: fmv.d.x fa5, a0
+; RV64-NEXT: vfmul.vf v10, v10, fa5, v0.t
+; RV64-NEXT: vfmadd.vv v10, v8, v9, v0.t
+; RV64-NEXT: vmv.v.v v8, v10
+; RV64-NEXT: ret
%t = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> splat (double 2.0), <vscale x 1 x i1> %m, i32 %evl)
%v = call fast <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %t, <vscale x 1 x double> splat (double 4.0), <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll
index e269b13137d44..93b12ad14d7e1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode-f16.ll
@@ -100,8 +100,9 @@ define half @vreduce_fminimum_nxv4f16(<vscale x 4 x half> %val) {
; ZVFH-NEXT: vcpop.m a0, v9
; ZVFH-NEXT: beqz a0, .LBB4_2
; ZVFH-NEXT: # %bb.1:
-; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa0, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: addi a0, a0, -512
+; ZVFH-NEXT: fmv.h.x fa0, a0
; ZVFH-NEXT: ret
; ZVFH-NEXT: .LBB4_2:
; ZVFH-NEXT: vfredmin.vs v8, v8, v8
@@ -138,8 +139,9 @@ define half @vreduce_fmaximum_nxv4f16(<vscale x 4 x half> %val) {
; ZVFH-NEXT: vcpop.m a0, v9
; ZVFH-NEXT: beqz a0, .LBB5_2
; ZVFH-NEXT: # %bb.1:
-; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa0, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: addi a0, a0, -512
+; ZVFH-NEXT: fmv.h.x fa0, a0
; ZVFH-NEXT: ret
; ZVFH-NEXT: .LBB5_2:
; ZVFH-NEXT: vfredmax.vs v8, v8, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index 78aae96242fd3..861998a2ba51a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -998,13 +998,13 @@ declare half @llvm.vector.reduce.fmin.nxv10f16(<vscale x 10 x half>)
define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
; CHECK-LABEL: vreduce_fmin_nxv10f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a0, %hi(.LCPI73_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI73_0)
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
-; CHECK-NEXT: vle16.v v12, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a1, a0, 2
; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addi a1, a1, -512
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v12, a1
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfredmin.vs v12, v8, v12
; CHECK-NEXT: vfmv.f.s fa0, v12
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll
index 8993bf8a767d8..7fb26fb6f6258 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp-f16.ll
@@ -124,8 +124,9 @@ define half @vpreduce_fminimum_nxv4f16(half %start, <vscale x 4 x half> %val, <v
; ZVFH-NEXT: or a1, a1, a2
; ZVFH-NEXT: beqz a1, .LBB4_2
; ZVFH-NEXT: # %bb.1:
-; ZVFH-NEXT: lui a0, %hi(.LCPI4_0)
-; ZVFH-NEXT: flh fa0, %lo(.LCPI4_0)(a0)
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: addi a0, a0, -512
+; ZVFH-NEXT: fmv.h.x fa0, a0
; ZVFH-NEXT: ret
; ZVFH-NEXT: .LBB4_2:
; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
@@ -176,8 +177,9 @@ define half @vpreduce_fmaximum_nxv4f16(half %start, <vscale x 4 x half> %val, <v
; ZVFH-NEXT: or a1, a1, a2
; ZVFH-NEXT: beqz a1, .LBB5_2
; ZVFH-NEXT: # %bb.1:
-; ZVFH-NEXT: lui a0, %hi(.LCPI5_0)
-; ZVFH-NEXT: flh fa0, %lo(.LCPI5_0)(a0)
+; ZVFH-NEXT: lui a0, 8
+; ZVFH-NEXT: addi a0, a0, -512
+; ZVFH-NEXT: fmv.h.x fa0, a0
; ZVFH-NEXT: ret
; ZVFH-NEXT: .LBB5_2:
; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 7d82787786ebf..05b76ec7733bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -123,13 +123,13 @@ define <vscale x 1 x double> @test4(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: beqz a1, .LBB3_2
; CHECK-NEXT: # %bb.1: # %if.then
-; CHECK-NEXT: lui a1, %hi(.LCPI3_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI3_0)(a1)
-; CHECK-NEXT: lui a1, %hi(.LCPI3_1)
-; CHECK-NEXT: fld fa4, %lo(.LCPI3_1)(a1)
+; CHECK-NEXT: li a1, 1023
+; CHECK-NEXT: slli a1, a1, 52
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vfmv.v.f v10, fa5
-; CHECK-NEXT: vfmv.v.f v11, fa4
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: slli a1, a1, 62
+; CHECK-NEXT: vmv.v.x v11, a1
; CHECK-NEXT: vfadd.vv v10, v10, v11
; CHECK-NEXT: lui a1, %hi(scratch)
; CHECK-NEXT: addi a1, a1, %lo(scratch)
@@ -242,12 +242,12 @@ define <vscale x 1 x double> @test6(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
; CHECK-NEXT: andi a1, a1, 2
; CHECK-NEXT: beqz a1, .LBB5_4
; CHECK-NEXT: .LBB5_2: # %if.then4
-; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI5_0)(a1)
-; CHECK-NEXT: lui a1, %hi(.LCPI5_1)
-; CHECK-NEXT: fld fa4, %lo(.LCPI5_1)(a1)
-; CHECK-NEXT: vfmv.v.f v9, fa5
-; CHECK-NEXT: vfmv.v.f v10, fa4
+; CHECK-NEXT: li a1, 1023
+; CHECK-NEXT: slli a1, a1, 52
+; CHECK-NEXT: vmv.v.x v9, a1
+; CHECK-NEXT: li a1, 1
+; CHECK-NEXT: slli a1, a1, 62
+; CHECK-NEXT: vmv.v.x v10, a1
; CHECK-NEXT: vfadd.vv v9, v9, v10
; CHECK-NEXT: lui a1, %hi(scratch)
; CHECK-NEXT: addi a1, a1, %lo(scratch)
diff --git a/llvm/test/CodeGen/RISCV/srodata.ll b/llvm/test/CodeGen/RISCV/srodata.ll
index 1d5bd904f233f..71ced1743efcd 100644
--- a/llvm/test/CodeGen/RISCV/srodata.ll
+++ b/llvm/test/CodeGen/RISCV/srodata.ll
@@ -4,12 +4,6 @@
; RUN: sed 's/SMALL_DATA_LIMIT/0/g' %s | \
; RUN: llc -mtriple=riscv64 -mattr=+d | \
; RUN: FileCheck -check-prefix=CHECK-SDL-0 %s
-; RUN: sed 's/SMALL_DATA_LIMIT/4/g' %s | \
-; RUN: llc -mtriple=riscv32 -mattr=+d | \
-; RUN: FileCheck -check-prefix=CHECK-SDL-4 %s
-; RUN: sed 's/SMALL_DATA_LIMIT/4/g' %s | \
-; RUN: llc -mtriple=riscv64 -mattr=+d | \
-; RUN: FileCheck -check-prefix=CHECK-SDL-4 %s
; RUN: sed 's/SMALL_DATA_LIMIT/8/g' %s | \
; RUN: llc -mtriple=riscv32 -mattr=+d | \
; RUN: FileCheck -check-prefix=CHECK-SDL-8 %s
@@ -23,11 +17,6 @@
; RUN: llc -mtriple=riscv64 -mattr=+d | \
; RUN: FileCheck -check-prefix=CHECK-SDL-16 %s
-define dso_local float @foof() {
-entry:
- ret float 0x400A08ACA0000000
-}
-
define dso_local double @foo() {
entry:
ret double 0x400A08AC91C3E242
@@ -39,9 +28,5 @@ entry:
; CHECK-SDL-0-NOT: .section .srodata.cst4
; CHECK-SDL-0-NOT: .section .srodata.cst8
-; CHECK-SDL-4: .section .srodata.cst4
-; CHECK-SDL-4-NOT: .section .srodata.cst8
-; CHECK-SDL-8: .section .srodata.cst4
; CHECK-SDL-8: .section .srodata.cst8
-; CHECK-SDL-16: .section .srodata.cst4
; CHECK-SDL-16: .section .srodata.cst8
>From ca290b70918b0216e602436751d640a3f239c629 Mon Sep 17 00:00:00 2001
From: Ross Brunton <bruntonross at protonmail.com>
Date: Wed, 24 Sep 2025 10:57:55 +0100
Subject: [PATCH 16/35] [Offload] Print Image location rather than casting it
(#160309)
This squishes a warning where the runtime tries to bind a StringRef to
a `%p`.
---
offload/plugins-nextgen/common/src/PluginInterface.cpp | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/offload/plugins-nextgen/common/src/PluginInterface.cpp b/offload/plugins-nextgen/common/src/PluginInterface.cpp
index 30b5db782370d..7d05dd25dbf75 100644
--- a/offload/plugins-nextgen/common/src/PluginInterface.cpp
+++ b/offload/plugins-nextgen/common/src/PluginInterface.cpp
@@ -1714,7 +1714,8 @@ int32_t GenericPluginTy::is_initialized() const { return Initialized; }
int32_t GenericPluginTy::isPluginCompatible(StringRef Image) {
auto HandleError = [&](Error Err) -> bool {
[[maybe_unused]] std::string ErrStr = toString(std::move(Err));
- DP("Failure to check validity of image %p: %s", Image, ErrStr.c_str());
+ DP("Failure to check validity of image %p: %s", Image.data(),
+ ErrStr.c_str());
return false;
};
switch (identify_magic(Image)) {
@@ -1742,7 +1743,8 @@ int32_t GenericPluginTy::isPluginCompatible(StringRef Image) {
int32_t GenericPluginTy::isDeviceCompatible(int32_t DeviceId, StringRef Image) {
auto HandleError = [&](Error Err) -> bool {
[[maybe_unused]] std::string ErrStr = toString(std::move(Err));
- DP("Failure to check validity of image %p: %s", Image, ErrStr.c_str());
+ DP("Failure to check validity of image %p: %s", Image.data(),
+ ErrStr.c_str());
return false;
};
switch (identify_magic(Image)) {
>From 3dfc10222625bfbe62d852fdc42bad0af9ab481f Mon Sep 17 00:00:00 2001
From: Abhilash Majumder <abmajumder at nvidia.com>
Date: Wed, 24 Sep 2025 15:29:41 +0530
Subject: [PATCH 17/35] [NVPTX] prefetch.tensormap pattern rewriter fix
(#159253)
Context: Highlighted from #156830 , this is an Isel lowering issue in
the NVPTX backend for prefetch.tensormap intrinsic.
It is caused by unchecked pattern rewrite during infer-address-space
pass.
This intrinsic is valid only for const, param and generic
address-spaces.
Any other address space is invalid. Currently, this intrinsic gets
falsely
re-written to target AS(1), when the pointer-argument of the intrinsic
comes as an argument of a kernel function.
So, this patch adds a check on the correct address-spaces
before re-writing them.
cc @durga4github
FYI: @Wolfram70 @rupprecht @castigli
---
.../Target/NVPTX/NVPTXTargetTransformInfo.cpp | 8 +++-
.../CodeGen/NVPTX/prefetch-inferas-test.ll | 35 +++++++++++++++--
llvm/test/CodeGen/NVPTX/prefetch.ll | 38 ++++++++++++++++++-
3 files changed, 74 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
index f4f89613b358d..b5bf72e45038a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXTargetTransformInfo.cpp
@@ -590,8 +590,12 @@ Value *NVPTXTTIImpl::rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
}
case Intrinsic::nvvm_prefetch_tensormap: {
IRBuilder<> Builder(II);
- return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_prefetch_tensormap,
- NewV);
+ const unsigned NewAS = NewV->getType()->getPointerAddressSpace();
+ if (NewAS == NVPTXAS::ADDRESS_SPACE_CONST ||
+ NewAS == NVPTXAS::ADDRESS_SPACE_PARAM)
+ return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_prefetch_tensormap,
+ NewV);
+ return nullptr;
}
}
return nullptr;
diff --git a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll
index bc67471209bf8..32b55a38e55ef 100644
--- a/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll
+++ b/llvm/test/CodeGen/NVPTX/prefetch-inferas-test.ll
@@ -11,7 +11,6 @@ target triple = "nvptx64-unknown-unknown"
define void @test_infer_const_from_cast() {
; INFER-LABEL: @test_infer_const_from_cast
; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
-; BOTH: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
; PTX-LABEL: .visible .func test_infer_const_from_cast(
; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
@@ -69,12 +68,40 @@ entry:
%cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
%cast2 = addrspacecast ptr %cast1 to ptr addrspace(4)
%cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr
- call void @llvm.nvvm.prefetch.tensormap(ptr %cast3)
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast3)
+ ret void
+}
+
+; Kernel Function Test
+; Cast from Param space to Generic
+define ptx_kernel void @test_param_to_generic_cast_kernel(ptr addrspace(101) %param_ptr) {
+; INFER-LABEL: @test_param_to_generic_cast_kernel
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+; PTX-LABEL: .visible .entry test_param_to_generic_cast_kernel(
+; PTX: prefetch.param.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast = addrspacecast ptr addrspace(101) %param_ptr to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast)
+ ret void
+}
+
+; Kernel Function Test
+; Multiple casts in sequence
+define ptx_kernel void @test_infer_through_multiple_casts_kernel() {
+; INFER-LABEL: @test_infer_through_multiple_casts_kernel
+; INFER: call void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4) @constant_tensormap)
+; PTX-LABEL: .visible .entry test_infer_through_multiple_casts_kernel(
+; PTX: mov.b64 %rd{{[0-9]+}}, constant_tensormap;
+; PTX: cvta.const.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}};
+; PTX: prefetch.tensormap [%rd{{[0-9]+}}];
+entry:
+ %cast1 = addrspacecast ptr addrspace(4) @constant_tensormap to ptr
+ %cast2 = addrspacecast ptr %cast1 to ptr addrspace(4)
+ %cast3 = addrspacecast ptr addrspace(4) %cast2 to ptr
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr %cast3)
ret void
}
declare void @llvm.nvvm.prefetch.tensormap.p0(ptr)
declare void @llvm.nvvm.prefetch.tensormap.p4(ptr addrspace(4))
declare void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101))
-
-
diff --git a/llvm/test/CodeGen/NVPTX/prefetch.ll b/llvm/test/CodeGen/NVPTX/prefetch.ll
index a1c5ec8f50a6b..c0489cc6fd73a 100644
--- a/llvm/test/CodeGen/NVPTX/prefetch.ll
+++ b/llvm/test/CodeGen/NVPTX/prefetch.ll
@@ -121,4 +121,40 @@ define void @prefetch_param_tensormap(ptr addrspace(101) %param_ptr) {
; CHECK-PTX64-NEXT: ret;
tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
ret void
-}
\ No newline at end of file
+}
+
+define ptx_kernel void @prefetch_generic_tensormap_kernel(ptr %ptr) {
+; CHECK-PTX64-LABEL: prefetch_generic_tensormap_kernel(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_generic_tensormap_kernel_param_0];
+; CHECK-PTX64-NEXT: prefetch.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p0(ptr %ptr)
+ ret void
+}
+
+define ptx_kernel void @prefetch_param_tensormap_kernel(ptr addrspace(101) %param_ptr) {
+; CHECK-PTX64-LABEL: prefetch_param_tensormap_kernel(
+; CHECK-PTX64: {
+; CHECK-PTX64-NEXT: .reg .b64 %rd<2>;
+; CHECK-PTX64-EMPTY:
+; CHECK-PTX64-NEXT: // %bb.0:
+; CHECK-PTX64-NEXT: ld.param.b64 %rd1, [prefetch_param_tensormap_kernel_param_0];
+; CHECK-PTX64-NEXT: prefetch.param.tensormap [%rd1];
+; CHECK-PTX64-NEXT: ret;
+ tail call void @llvm.nvvm.prefetch.tensormap.p101(ptr addrspace(101) %param_ptr)
+ ret void
+}
+
+define ptx_kernel void @prefetch_grid_const_tensormap(ptr byval([64 x i8]) align 64 "nvvm.grid_constant" %ptr) {
+; CHECK-PTX64-LABEL: .visible .entry prefetch_grid_const_tensormap(
+; CHECK-PTX64: prefetch.tensormap [%{{(SP|rd[0-9]+).*}}];
+; CHECK-PTX64: ret;
+
+entry:
+ call void @llvm.nvvm.prefetch.tensormap.p0(ptr addrspace(0) %ptr)
+ ret void
+}
>From 3e992ba9e87ea53cb6b7b205c86f88ca1903d1ab Mon Sep 17 00:00:00 2001
From: Maksim Levental <maksim.levental at gmail.com>
Date: Wed, 24 Sep 2025 06:00:34 -0400
Subject: [PATCH 18/35] Revert "[MLIR][Python] add Python wheel build
demo/test" (#160481)
Reverts llvm/llvm-project#160388 because it broke
[mlir-nvidia](https://lab.llvm.org/buildbot/#/builders/138) builder.
---
mlir/examples/standalone/CMakeLists.txt | 8 +--
mlir/examples/standalone/pyproject.toml | 65 --------------------
mlir/test/Examples/standalone/lit.local.cfg | 5 --
mlir/test/Examples/standalone/test.wheel.toy | 31 ----------
mlir/test/lit.site.cfg.py.in | 2 -
5 files changed, 2 insertions(+), 109 deletions(-)
delete mode 100644 mlir/examples/standalone/pyproject.toml
delete mode 100644 mlir/test/Examples/standalone/test.wheel.toy
diff --git a/mlir/examples/standalone/CMakeLists.txt b/mlir/examples/standalone/CMakeLists.txt
index c6c49fde12d2e..e2bcda7fa6f0b 100644
--- a/mlir/examples/standalone/CMakeLists.txt
+++ b/mlir/examples/standalone/CMakeLists.txt
@@ -63,12 +63,8 @@ if(MLIR_ENABLE_BINDINGS_PYTHON)
include(MLIRDetectPythonEnv)
mlir_configure_python_dev_packages()
# Note: for EXTERNAL_PROJECT_BUILD this must be set from the command line.
- if(NOT MLIR_PYTHON_PACKAGE_PREFIX)
- set(MLIR_PYTHON_PACKAGE_PREFIX "mlir_standalone" CACHE STRING "" FORCE)
- endif()
- if(NOT MLIR_BINDINGS_PYTHON_INSTALL_PREFIX)
- set(MLIR_BINDINGS_PYTHON_INSTALL_PREFIX "python_packages/standalone/${MLIR_PYTHON_PACKAGE_PREFIX}" CACHE STRING "" FORCE)
- endif()
+ set(MLIR_PYTHON_PACKAGE_PREFIX "mlir_standalone" CACHE STRING "" FORCE)
+ set(MLIR_BINDINGS_PYTHON_INSTALL_PREFIX "python_packages/standalone/${MLIR_PYTHON_PACKAGE_PREFIX}" CACHE STRING "" FORCE)
add_subdirectory(python)
endif()
add_subdirectory(test)
diff --git a/mlir/examples/standalone/pyproject.toml b/mlir/examples/standalone/pyproject.toml
deleted file mode 100644
index 5a1e6e86513c3..0000000000000
--- a/mlir/examples/standalone/pyproject.toml
+++ /dev/null
@@ -1,65 +0,0 @@
-# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-# See https://llvm.org/LICENSE.txt for license information.
-# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-# Copyright (c) 2025.
-
-[project]
-name = "standalone-python-bindings"
-dynamic = ["version"]
-requires-python = ">=3.8,<=3.14"
-dependencies = [
- "numpy>=1.19.5, <=2.1.2",
- "PyYAML>=5.4.0, <=6.0.1",
- "ml_dtypes>=0.1.0, <=0.6.0; python_version<'3.13'",
- "ml_dtypes>=0.5.0, <=0.6.0; python_version>='3.13'",
-]
-
-[project.urls]
-Homepage = "https://github.com/llvm/llvm-project"
-Discussions = "https://discourse.llvm.org/"
-"Issue Tracker" = "https://github.com/llvm/llvm-project/issues?q=is%3Aissue%20state%3Aopen%20label%3Amlir%3Apython%20"
-"Source Code" = "https://github.com/llvm/llvm-project/tree/main/mlir/python"
-
-[build-system]
-requires = [
- "scikit-build-core>=0.10.7",
- "typing_extensions>=4.12.2",
- "nanobind>=2.9, <3.0",
- "pybind11>=2.10.0, <=2.13.6",
-]
-build-backend = "scikit_build_core.build"
-
-[tool.scikit-build]
-# This is the minimum version of scikit-build-core.
-minimum-version = "0.10.7"
-# This pyproject.toml must be adjacent to the root CMakeLists.txt (wherever project(...) is specified).
-cmake.source-dir = "."
-# This is for installing/distributing the python bindings target and only the python bindings target.
-build.targets = ["StandalonePythonModules"]
-install.components = ["StandalonePythonModules"]
-
-[tool.scikit-build.cmake.define]
-# Optional
-CMAKE_C_COMPILER = { env = "CMAKE_C_COMPILER", default = "" }
-CMAKE_CXX_COMPILER = { env = "CMAKE_CXX_COMPILER", default = "" }
-CMAKE_C_COMPILER_LAUNCHER = { env = "CMAKE_C_COMPILER_LAUNCHER", default = "" }
-CMAKE_CXX_COMPILER_LAUNCHER = { env = "CMAKE_CXX_COMPILER_LAUNCHER", default = "" }
-CMAKE_GENERATOR = { env = "CMAKE_GENERATOR", default = "Ninja" }
-LLVM_USE_LINKER = { env = "LLVM_USE_LINKER", default = "" }
-# Optional but highly recommended (this makes the bindings compatible with other bindings packages
-# by preventing symbol collisions).
-CMAKE_VISIBILITY_INLINES_HIDDEN = "ON"
-CMAKE_C_VISIBILITY_PRESET = "hidden"
-CMAKE_CXX_VISIBILITY_PRESET = "hidden"
-
-# Non-optional (alternatively you could use CMAKE_PREFIX_PATH here).
-MLIR_DIR = { env = "MLIR_DIR", default = "" }
-# Non-optional
-CMAKE_BUILD_TYPE = { env = "CMAKE_BUILD_TYPE", default = "Release" }
-MLIR_ENABLE_BINDINGS_PYTHON = "ON"
-# Effectively non-optional (any downstream project should specify this).
-MLIR_PYTHON_PACKAGE_PREFIX = "mlir_standalone"
-# This specifies the directory in the install directory (i.e., /tmp/pip-wheel/platlib) where _mlir_libs, dialects, etc.
-# are installed. Thus, this will be the package location (and the name of the package) that pip assumes is
-# the root package.
-MLIR_BINDINGS_PYTHON_INSTALL_PREFIX = "mlir_standalone"
diff --git a/mlir/test/Examples/standalone/lit.local.cfg b/mlir/test/Examples/standalone/lit.local.cfg
index a566208b47bc1..3b12dcbd99e83 100644
--- a/mlir/test/Examples/standalone/lit.local.cfg
+++ b/mlir/test/Examples/standalone/lit.local.cfg
@@ -1,5 +1,3 @@
-import os
-
# Disable with sanitizers for now, this require some more setup apparently.
for san in ["asan", "msan", "ubsan"]:
if san in config.available_features:
@@ -9,10 +7,7 @@ config.substitutions.append(("%cmake_exe", config.host_cmake))
config.substitutions.append(("%cmake_generator", config.host_cmake_generator))
config.substitutions.append(("%host_cxx", config.host_cxx))
config.substitutions.append(("%host_cc", config.host_cc))
-config.substitutions.append(("%hostc_compiler_launcher", config.host_c_compiler_launcher))
-config.substitutions.append(("%hostcxx_compiler_launcher", config.host_cxx_compiler_launcher))
config.substitutions.append(("%enable_libcxx", config.enable_libcxx))
config.substitutions.append(("%mlir_cmake_dir", config.mlir_cmake_dir))
-config.substitutions.append(("%mlir_obj_root", config.mlir_obj_root))
config.substitutions.append(("%llvm_use_linker", config.llvm_use_linker))
config.substitutions.append(("%cmake_build_type", config.cmake_build_type))
diff --git a/mlir/test/Examples/standalone/test.wheel.toy b/mlir/test/Examples/standalone/test.wheel.toy
deleted file mode 100644
index 1a439d5689939..0000000000000
--- a/mlir/test/Examples/standalone/test.wheel.toy
+++ /dev/null
@@ -1,31 +0,0 @@
-# There's no real issue with windows here, it's just that some CMake generated paths for targets end up being longer
-# than 255 chars when combined with the fact that pip wants to install into a tmp directory buried under
-# C/Users/ContainerAdministrator/AppData/Local/Temp.
-# UNSUPPORTED: target={{.*(windows).*}}
-
-# RUN: export CMAKE_BUILD_TYPE=%cmake_build_type
-# RUN: export CMAKE_CXX_COMPILER=%host_cxx
-# RUN: export CMAKE_CXX_COMPILER_LAUNCHER=%hostcxx_compiler_launcher
-# RUN: export CMAKE_C_COMPILER=%host_cc
-# RUN: export CMAKE_C_COMPILER_LAUNCHER=%hostc_compiler_launcher
-# RUN: export CMAKE_GENERATOR=%cmake_generator
-# RUN: export LLVM_USE_LINKER=%llvm_use_linker
-# RUN: export MLIR_DIR="%mlir_cmake_dir"
-
-# RUN: %python -m pip wheel "%mlir_src_root/examples/standalone" -w "%mlir_obj_root/wheelhouse" -v | tee %t
-
-# RUN: rm -rf "%mlir_obj_root/standalone-python-bindings-install"
-# RUN: %python -m pip install standalone_python_bindings -f "%mlir_obj_root/wheelhouse" --target "%mlir_obj_root/standalone-python-bindings-install" -v | tee -a %t
-
-# RUN: export PYTHONPATH="%mlir_obj_root/standalone-python-bindings-install"
-# RUN: %python "%mlir_src_root/examples/standalone/test/python/smoketest.py" nanobind | tee -a %t
-
-# RUN: FileCheck --input-file=%t %s
-
-# CHECK: Successfully built standalone-python-bindings
-
-# CHECK: module {
-# CHECK: %[[C2:.*]] = arith.constant 2 : i32
-# CHECK: %[[V0:.*]] = standalone.foo %[[C2]] : i32
-# CHECK: }
-
diff --git a/mlir/test/lit.site.cfg.py.in b/mlir/test/lit.site.cfg.py.in
index 940e2ad3c4365..2fc595dfabbf5 100644
--- a/mlir/test/lit.site.cfg.py.in
+++ b/mlir/test/lit.site.cfg.py.in
@@ -15,8 +15,6 @@ config.native_target = "@LLVM_NATIVE_ARCH@"
config.host_os = "@HOST_OS@"
config.host_cc = "@HOST_CC@"
config.host_cxx = "@HOST_CXX@"
-config.host_c_compiler_launcher = "@CMAKE_C_COMPILER_LAUNCHER@"
-config.host_cxx_compiler_launcher = "@CMAKE_CXX_COMPILER_LAUNCHER@"
config.enable_libcxx = "@LLVM_ENABLE_LIBCXX@"
config.host_cmake = "@CMAKE_COMMAND@"
config.host_cmake_generator = "@CMAKE_GENERATOR@"
>From c3518b4a8ba4eba71eb31de8bbddf7b9117e2f74 Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Wed, 24 Sep 2025 11:21:59 +0100
Subject: [PATCH 19/35] [AArch64] Add a test case showing both dup and
scalar_to_reg in the same function. NFC
---
.../AArch64/aarch64-matrix-umull-smull.ll | 67 ++++++++++++++++++-
1 file changed, 66 insertions(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index 8655bb1292ef7..cdde11042462b 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -1365,7 +1365,72 @@ for.end12: ; preds = %vector.body
ret void
}
-declare i16 @llvm.vector.reduce.add.v8i16(<8 x i16>)
+define noundef <8 x i16> @cmplx_mul_combined_re_im(<8 x i16> noundef %a, i64 %scale.coerce) {
+; CHECK-SD-LABEL: cmplx_mul_combined_re_im:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: lsr x9, x0, #16
+; CHECK-SD-NEXT: adrp x8, .LCPI14_0
+; CHECK-SD-NEXT: dup v4.8h, w0
+; CHECK-SD-NEXT: dup v1.8h, w9
+; CHECK-SD-NEXT: fmov s3, w9
+; CHECK-SD-NEXT: sqneg v2.8h, v1.8h
+; CHECK-SD-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
+; CHECK-SD-NEXT: tbl v1.16b, { v2.16b, v3.16b }, v1.16b
+; CHECK-SD-NEXT: rev32 v2.8h, v0.8h
+; CHECK-SD-NEXT: sqdmull v3.4s, v0.4h, v4.4h
+; CHECK-SD-NEXT: sqdmull2 v0.4s, v0.8h, v4.8h
+; CHECK-SD-NEXT: sqdmlal v3.4s, v2.4h, v1.4h
+; CHECK-SD-NEXT: sqdmlal2 v0.4s, v2.8h, v1.8h
+; CHECK-SD-NEXT: uzp2 v0.8h, v3.8h, v0.8h
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmplx_mul_combined_re_im:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: lsr w9, w0, #16
+; CHECK-GI-NEXT: adrp x8, .LCPI14_0
+; CHECK-GI-NEXT: rev32 v4.8h, v0.8h
+; CHECK-GI-NEXT: dup v1.8h, w9
+; CHECK-GI-NEXT: fmov s3, w9
+; CHECK-GI-NEXT: sqneg v2.8h, v1.8h
+; CHECK-GI-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
+; CHECK-GI-NEXT: tbl v1.16b, { v2.16b, v3.16b }, v1.16b
+; CHECK-GI-NEXT: mov d2, v0.d[1]
+; CHECK-GI-NEXT: dup v3.8h, w0
+; CHECK-GI-NEXT: sqdmull v2.4s, v2.4h, v3.4h
+; CHECK-GI-NEXT: sqdmull v5.4s, v4.4h, v1.4h
+; CHECK-GI-NEXT: sqdmlal v5.4s, v0.4h, v3.4h
+; CHECK-GI-NEXT: sqdmlal2 v2.4s, v4.8h, v1.8h
+; CHECK-GI-NEXT: uzp2 v0.8h, v5.8h, v2.8h
+; CHECK-GI-NEXT: ret
+entry:
+ %scale.sroa.0.0.extract.trunc = trunc i64 %scale.coerce to i16
+ %scale.sroa.2.0.extract.shift23 = lshr i64 %scale.coerce, 16
+ %scale.sroa.2.0.extract.trunc = trunc i64 %scale.sroa.2.0.extract.shift23 to i16
+ %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
+ %vecinit.i24 = insertelement <8 x i16> poison, i16 %scale.sroa.0.0.extract.trunc, i64 0
+ %vecinit.i = insertelement <8 x i16> poison, i16 %scale.sroa.2.0.extract.trunc, i64 0
+ %vecinit7.i = shufflevector <8 x i16> %vecinit.i, <8 x i16> poison, <8 x i32> zeroinitializer
+ %vqnegq_v1.i = tail call noundef <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> %vecinit7.i)
+ %vbsl5.i = shufflevector <8 x i16> %vqnegq_v1.i, <8 x i16> %vecinit.i, <8 x i32> <i32 0, i32 8, i32 2, i32 8, i32 4, i32 8, i32 6, i32 8>
+ %shuffle.i40 = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle.i39 = shufflevector <8 x i16> %vecinit.i24, <8 x i16> poison, <4 x i32> zeroinitializer
+ %vqdmull_v2.i36 = tail call noundef <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i40, <4 x i16> %shuffle.i39)
+ %shuffle.i44 = shufflevector <8 x i16> %a, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmull_v2.i = tail call noundef <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i44, <4 x i16> %shuffle.i39)
+ %shuffle.i38 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %shuffle.i37 = shufflevector <8 x i16> %vbsl5.i, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %vqdmlal2.i45 = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i38, <4 x i16> %shuffle.i37)
+ %vqdmlal_v3.i46 = tail call noundef <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %vqdmull_v2.i36, <4 x i32> %vqdmlal2.i45)
+ %shuffle.i42 = shufflevector <8 x i16> %shuffle.i, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %shuffle.i41 = shufflevector <8 x i16> %vbsl5.i, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+ %vqdmlal2.i = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i42, <4 x i16> %shuffle.i41)
+ %vqdmlal_v3.i = tail call noundef <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %vqdmull_v2.i, <4 x i32> %vqdmlal2.i)
+ %0 = bitcast <4 x i32> %vqdmlal_v3.i46 to <8 x i16>
+ %1 = bitcast <4 x i32> %vqdmlal_v3.i to <8 x i16>
+ %shuffle.i35 = shufflevector <8 x i16> %0, <8 x i16> %1, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %shuffle.i35
+}
+
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; CHECK: {{.*}}
>From b202edd555e309d430d1b6eb46c39770e5f7fa6f Mon Sep 17 00:00:00 2001
From: Jonathan Thackray <jonathan.thackray at arm.com>
Date: Wed, 24 Sep 2025 11:29:12 +0100
Subject: [PATCH 20/35] [AArch64][llvm] (NFC) Rename `[SU]DOT_ZZZI?_{S|D}` and
`BFMMLA_ZZZ` to _BtoS/_HtoD/HtoS to clarify (#160139)
Rename instruction references for `[SU]DOT_ZZZI?_{S|D}` and `BFMMLA_ZZZ`
to _BtoS/_HtoD/_HtoS to better clarify intent, and all cascading changes
in other places where these names are referenced.
---
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td | 10 +++++-----
llvm/lib/Target/AArch64/AArch64SchedA320.td | 6 +++---
llvm/lib/Target/AArch64/AArch64SchedA510.td | 6 +++---
.../Target/AArch64/AArch64SchedNeoverseN2.td | 6 +++---
.../Target/AArch64/AArch64SchedNeoverseN3.td | 6 +++---
.../Target/AArch64/AArch64SchedNeoverseV1.td | 6 +++---
.../Target/AArch64/AArch64SchedNeoverseV2.td | 6 +++---
llvm/lib/Target/AArch64/SVEInstrFormats.td | 16 ++++++++--------
.../AArch64/Neoverse/V1-sve-instructions.s | 18 +++++++++---------
9 files changed, 40 insertions(+), 40 deletions(-)
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 7fe4f7acdbd49..1e30735b7a56a 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -709,13 +709,13 @@ let Predicates = [HasSVE_or_SME] in {
let Predicates = [HasSVE_or_SME] in {
def : Pat<(nxv4i32 (partial_reduce_umla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
- (UDOT_ZZZ_S $Acc, $MulLHS, $MulRHS)>;
+ (UDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>;
def : Pat<(nxv4i32 (partial_reduce_smla nxv4i32:$Acc, nxv16i8:$MulLHS, nxv16i8:$MulRHS)),
- (SDOT_ZZZ_S $Acc, $MulLHS, $MulRHS)>;
+ (SDOT_ZZZ_BtoS $Acc, $MulLHS, $MulRHS)>;
def : Pat<(nxv2i64 (partial_reduce_umla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
- (UDOT_ZZZ_D $Acc, $MulLHS, $MulRHS)>;
+ (UDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>;
def : Pat<(nxv2i64 (partial_reduce_smla nxv2i64:$Acc, nxv8i16:$MulLHS, nxv8i16:$MulRHS)),
- (SDOT_ZZZ_D $Acc, $MulLHS, $MulRHS)>;
+ (SDOT_ZZZ_HtoD $Acc, $MulLHS, $MulRHS)>;
} // End HasSVE_or_SME
defm SDOT_ZZZI : sve_intx_dot_by_indexed_elem<0b0, "sdot", int_aarch64_sve_sdot_lane>;
@@ -2541,7 +2541,7 @@ let Predicates = [HasBF16, HasSVE_or_SME] in {
} // End HasBF16, HasSVE_or_SME
let Predicates = [HasBF16, HasSVE] in {
- defm BFMMLA_ZZZ : sve_fp_matrix_mla<0b01, "bfmmla", ZPR32, ZPR16, int_aarch64_sve_bfmmla, nxv4f32, nxv8bf16>;
+ defm BFMMLA_ZZZ_HtoS : sve_fp_matrix_mla<0b01, "bfmmla", ZPR32, ZPR16, int_aarch64_sve_bfmmla, nxv4f32, nxv8bf16>;
} // End HasBF16, HasSVE
let Predicates = [HasBF16, HasSVE_or_SME] in {
diff --git a/llvm/lib/Target/AArch64/AArch64SchedA320.td b/llvm/lib/Target/AArch64/AArch64SchedA320.td
index 5ec95c707c28f..2c193e59cc417 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedA320.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedA320.td
@@ -826,13 +826,13 @@ def : InstRW<[CortexA320MCWrite<15, 12, CortexA320UnitVMC>], (instregex "^[SU]DI
def : InstRW<[CortexA320MCWrite<26, 23, CortexA320UnitVMC>], (instregex "^[SU]DIVR?_(ZPmZ|ZPZZ)_D")>;
// Dot product, 8 bit
-def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_S")>;
+def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_BtoS")>;
// Dot product, 8 bit, using signed and unsigned integers
def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>;
// Dot product, 16 bit
-def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_D")>;
+def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_HtoD")>;
// Duplicate, immediate and indexed form
def : InstRW<[CortexA320Write<3, CortexA320UnitVALU>], (instregex "^DUP_ZI_[BHSD]",
@@ -1182,7 +1182,7 @@ def : InstRW<[CortexA320Write<4, CortexA320UnitVALU>], (instrs BFCVT_ZPmZ, BFCVT
def : InstRW<[CortexA320Write_11cyc_1VMAC_1VALU], (instrs BFDOT_ZZI, BFDOT_ZZZ)>;
// Matrix multiply accumulate
-def : InstRW<[CortexA320Write_16cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ)>;
+def : InstRW<[CortexA320Write_16cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ_HtoS)>;
// Multiply accumulate long
def : InstRW<[CortexA320Write<4, CortexA320UnitVMAC>], (instregex "^BFMLAL[BT]_ZZZ(I)?")>;
diff --git a/llvm/lib/Target/AArch64/AArch64SchedA510.td b/llvm/lib/Target/AArch64/AArch64SchedA510.td
index 356e3fa39c53f..66f49f040ad12 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedA510.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedA510.td
@@ -804,13 +804,13 @@ def : InstRW<[CortexA510MCWrite<15, 12, CortexA510UnitVMC>], (instregex "^[SU]DI
def : InstRW<[CortexA510MCWrite<26, 23, CortexA510UnitVMC>], (instregex "^[SU]DIVR?_(ZPmZ|ZPZZ)_D")>;
// Dot product, 8 bit
-def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_S")>;
+def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_BtoS")>;
// Dot product, 8 bit, using signed and unsigned integers
def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>;
// Dot product, 16 bit
-def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_D")>;
+def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^[SU]DOT_ZZZI?_HtoD")>;
// Duplicate, immediate and indexed form
def : InstRW<[CortexA510Write<3, CortexA510UnitVALU>], (instregex "^DUP_ZI_[BHSD]",
@@ -1160,7 +1160,7 @@ def : InstRW<[CortexA510Write<4, CortexA510UnitVALU>], (instrs BFCVT_ZPmZ, BFCVT
def : InstRW<[A510Write_10cyc_1VMAC_1VALU], (instrs BFDOT_ZZI, BFDOT_ZZZ)>;
// Matrix multiply accumulate
-def : InstRW<[A510Write_15cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ)>;
+def : InstRW<[A510Write_15cyc_1VMAC_1VALU], (instrs BFMMLA_ZZZ_HtoS)>;
// Multiply accumulate long
def : InstRW<[CortexA510Write<4, CortexA510UnitVMAC>], (instregex "^BFMLAL[BT]_ZZZ(I)?")>;
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
index e7982226ff3d1..50f10114989d0 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
@@ -1764,13 +1764,13 @@ def : InstRW<[N2Write_20c_1V0], (instregex "^[SU]DIVR?_ZPmZ_D",
"^[SU]DIV_ZPZZ_D")>;
// Dot product, 8 bit
-def : InstRW<[N2Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_S$")>;
+def : InstRW<[N2Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_BtoS$")>;
// Dot product, 8 bit, using signed and unsigned integers
def : InstRW<[N2Write_3c_1V], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>;
// Dot product, 16 bit
-def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_D$")>;
+def : InstRW<[N2Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_HtoD$")>;
// Duplicate, immediate and indexed form
def : InstRW<[N2Write_2c_1V], (instregex "^DUP_ZI_[BHSD]$",
@@ -2118,7 +2118,7 @@ def : InstRW<[N2Write_3c_1V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>;
def : InstRW<[N2Write_4c_1V], (instrs BFDOT_ZZI, BFDOT_ZZZ)>;
// Matrix multiply accumulate
-def : InstRW<[N2Write_5c_1V], (instrs BFMMLA_ZZZ)>;
+def : InstRW<[N2Write_5c_1V], (instrs BFMMLA_ZZZ_HtoS)>;
// Multiply accumulate long
def : InstRW<[N2Write_4c_1V], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>;
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td
index cd0d8a9186d5b..411b372a3f533 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN3.td
@@ -1736,13 +1736,13 @@ def : InstRW<[N3Write_16c_16V0], (instregex "^[SU]DIVR?_ZPmZ_D",
"^[SU]DIV_ZPZZ_D")>;
// Dot product, 8 bit
-def : InstRW<[N3Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_S$")>;
+def : InstRW<[N3Write_3c_1V], (instregex "^[SU]DOT_ZZZI?_BtoS$")>;
// Dot product, 8 bit, using signed and unsigned integers
def : InstRW<[N3Write_3c_1V], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>;
// Dot product, 16 bit
-def : InstRW<[N3Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_D$")>;
+def : InstRW<[N3Write_4c_1V0], (instregex "^[SU]DOT_ZZZI?_HtoD$")>;
// Duplicate, immediate and indexed form
def : InstRW<[N3Write_2c_1V], (instregex "^DUP_ZI_[BHSD]$",
@@ -2082,7 +2082,7 @@ def : InstRW<[N3Write_4c_2V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>;
def : InstRW<[N3Write_4c_1V], (instrs BFDOT_ZZI, BFDOT_ZZZ)>;
// Matrix multiply accumulate
-def : InstRW<[N3Write_5c_1V], (instrs BFMMLA_ZZZ)>;
+def : InstRW<[N3Write_5c_1V], (instrs BFMMLA_ZZZ_HtoS)>;
// Multiply accumulate long
def : InstRW<[N3Write_4c_1V], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>;
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
index f28df44bfdb38..3cbfc59423c9a 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV1.td
@@ -1555,14 +1555,14 @@ def : InstRW<[V1Write_20c7_1V0], (instregex "^[SU]DIVR?_ZPmZ_D",
"^[SU]DIV_ZPZZ_D")>;
// Dot product, 8 bit
-def : InstRW<[V1Wr_ZDOTB, V1Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_S$")>;
+def : InstRW<[V1Wr_ZDOTB, V1Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_BtoS$")>;
// Dot product, 8 bit, using signed and unsigned integers
def : InstRW<[V1Wr_ZUDOTB, V1Rd_ZUDOTB],
(instrs SUDOT_ZZZI, USDOT_ZZZ, USDOT_ZZZI)>;
// Dot product, 16 bit
-def : InstRW<[V1Wr_ZDOTH, V1Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_D$")>;
+def : InstRW<[V1Wr_ZDOTH, V1Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_HtoD$")>;
// Duplicate, immediate and indexed form
def : InstRW<[V1Write_2c_1V01], (instregex "^DUP_ZI_[BHSD]$",
@@ -1808,7 +1808,7 @@ def : InstRW<[V1Write_4c_1V0], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>;
def : InstRW<[V1Wr_ZBFDOT, V1Rd_ZBFDOT], (instrs BFDOT_ZZI, BFDOT_ZZZ)>;
// Matrix multiply accumulate
-def : InstRW<[V1Wr_ZBFMMA, V1Rd_ZBFMMA], (instrs BFMMLA_ZZZ)>;
+def : InstRW<[V1Wr_ZBFMMA, V1Rd_ZBFMMA], (instrs BFMMLA_ZZZ_HtoS)>;
// Multiply accumulate long
def : InstRW<[V1Wr_ZBFMAL, V1Rd_ZBFMAL], (instregex "^BFMLAL[BT]_ZZZ(I)?$")>;
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
index 6261220082029..bdde8e388cccc 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseV2.td
@@ -2251,13 +2251,13 @@ def : InstRW<[V2Write_20c_1V0], (instregex "^[SU]DIVR?_ZPmZ_D",
"^[SU]DIV_ZPZZ_D")>;
// Dot product, 8 bit
-def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_S")>;
+def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instregex "^[SU]DOT_ZZZI?_BtoS")>;
// Dot product, 8 bit, using signed and unsigned integers
def : InstRW<[V2Wr_ZDOTB, V2Rd_ZDOTB], (instrs SUDOT_ZZZI, USDOT_ZZZI, USDOT_ZZZ)>;
// Dot product, 16 bit
-def : InstRW<[V2Wr_ZDOTH, V2Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_D")>;
+def : InstRW<[V2Wr_ZDOTH, V2Rd_ZDOTH], (instregex "^[SU]DOT_ZZZI?_HtoD")>;
// Duplicate, immediate and indexed form
def : InstRW<[V2Write_2c_1V], (instregex "^DUP_ZI_[BHSD]",
@@ -2614,7 +2614,7 @@ def : InstRW<[V2Write_4c_1V02], (instrs BFCVT_ZPmZ, BFCVTNT_ZPmZ)>;
def : InstRW<[V2Wr_ZBFDOT, V2Rd_ZBFDOT], (instrs BFDOT_ZZI, BFDOT_ZZZ)>;
// Matrix multiply accumulate
-def : InstRW<[V2Wr_ZBFMMA, V2Rd_ZBFMMA], (instrs BFMMLA_ZZZ)>;
+def : InstRW<[V2Wr_ZBFMMA, V2Rd_ZBFMMA], (instrs BFMMLA_ZZZ_HtoS)>;
// Multiply accumulate long
def : InstRW<[V2Wr_ZBFMAL, V2Rd_ZBFMAL], (instregex "^BFMLAL[BT]_ZZZI?")>;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index 7913e8ca8652e..000532ec23916 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -3770,11 +3770,11 @@ class sve_intx_dot<bit sz, bit U, string asm, ZPRRegOp zprty1,
}
multiclass sve_intx_dot<bit opc, string asm, SDPatternOperator op> {
- def _S : sve_intx_dot<0b0, opc, asm, ZPR32, ZPR8>;
- def _D : sve_intx_dot<0b1, opc, asm, ZPR64, ZPR16>;
+ def _BtoS : sve_intx_dot<0b0, opc, asm, ZPR32, ZPR8>;
+ def _HtoD : sve_intx_dot<0b1, opc, asm, ZPR64, ZPR16>;
- def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _S)>;
- def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _D)>;
+ def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv16i8, nxv16i8, !cast<Instruction>(NAME # _BtoS)>;
+ def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv8i16, nxv8i16, !cast<Instruction>(NAME # _HtoD)>;
}
//===----------------------------------------------------------------------===//
@@ -3804,21 +3804,21 @@ class sve_intx_dot_by_indexed_elem<bit sz, bit U, string asm,
multiclass sve_intx_dot_by_indexed_elem<bit opc, string asm,
SDPatternOperator op> {
- def _S : sve_intx_dot_by_indexed_elem<0b0, opc, asm, ZPR32, ZPR8, ZPR3b8, VectorIndexS32b_timm> {
+ def _BtoS : sve_intx_dot_by_indexed_elem<0b0, opc, asm, ZPR32, ZPR8, ZPR3b8, VectorIndexS32b_timm> {
bits<2> iop;
bits<3> Zm;
let Inst{20-19} = iop;
let Inst{18-16} = Zm;
}
- def _D : sve_intx_dot_by_indexed_elem<0b1, opc, asm, ZPR64, ZPR16, ZPR4b16, VectorIndexD32b_timm> {
+ def _HtoD : sve_intx_dot_by_indexed_elem<0b1, opc, asm, ZPR64, ZPR16, ZPR4b16, VectorIndexD32b_timm> {
bits<1> iop;
bits<4> Zm;
let Inst{20} = iop;
let Inst{19-16} = Zm;
}
- def : SVE_4_Op_Imm_Pat<nxv4i32, op, nxv4i32, nxv16i8, nxv16i8, i32, VectorIndexS32b_timm, !cast<Instruction>(NAME # _S)>;
- def : SVE_4_Op_Imm_Pat<nxv2i64, op, nxv2i64, nxv8i16, nxv8i16, i32, VectorIndexD32b_timm, !cast<Instruction>(NAME # _D)>;
+ def : SVE_4_Op_Imm_Pat<nxv4i32, op, nxv4i32, nxv16i8, nxv16i8, i32, VectorIndexS32b_timm, !cast<Instruction>(NAME # _BtoS)>;
+ def : SVE_4_Op_Imm_Pat<nxv2i64, op, nxv2i64, nxv8i16, nxv8i16, i32, VectorIndexD32b_timm, !cast<Instruction>(NAME # _HtoD)>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
index 911ad1900195c..fe3742c9e4d3b 100644
--- a/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
+++ b/llvm/test/tools/llvm-mca/AArch64/Neoverse/V1-sve-instructions.s
@@ -2649,7 +2649,7 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 5 0.50 2 V1UnitV,V1UnitV01 BFMLALT_ZZZI bfmlalt z0.s, z1.h, z2.h[7]
# CHECK-NEXT: 1 5 0.50 2 V1UnitV,V1UnitV01 BFMLALT_ZZZI bfmlalt z0.s, z1.h, z7.h[7]
# CHECK-NEXT: 1 5 0.50 2 V1UnitV,V1UnitV01 BFMLALT_ZZZ bfmlalt z14.s, z10.h, z21.h
-# CHECK-NEXT: 1 5 0.50 3 V1UnitV,V1UnitV01 BFMMLA_ZZZ bfmmla z0.s, z1.h, z2.h
+# CHECK-NEXT: 1 5 0.50 3 V1UnitV,V1UnitV01 BFMMLA_ZZZ_HtoS bfmmla z0.s, z1.h, z2.h
# CHECK-NEXT: 1 1 1.00 1 V1UnitI,V1UnitM,V1UnitM0 BIC_PPzPP bic p0.b, p0/z, p0.b, p0.b
# CHECK-NEXT: 1 1 1.00 1 V1UnitI,V1UnitM,V1UnitM0 BIC_PPzPP bic p15.b, p15/z, p15.b, p15.b
# CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 BIC_ZZZ bic z0.d, z0.d, z0.d
@@ -4228,10 +4228,10 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] SDIV_ZPmZ_S sdiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: 1 20 7.00 20 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] SDIVR_ZPmZ_D sdivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] SDIVR_ZPmZ_S sdivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZI_D sdot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZ_D sdot z0.d, z1.h, z31.h
-# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZ_S sdot z0.s, z1.b, z31.b
-# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZI_S sdot z0.s, z1.b, z7.b[3]
+# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZI_HtoD sdot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 SDOT_ZZZ_HtoD sdot z0.d, z1.h, z31.h
+# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZ_BtoS sdot z0.s, z1.b, z31.b
+# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 SDOT_ZZZI_BtoS sdot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 SEL_ZPZZ_B sel z23.b, p11, z13.b, z8.b
# CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 SEL_ZPZZ_D sel z23.d, p11, z13.d, z8.d
# CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 SEL_ZPZZ_H sel z23.h, p11, z13.h, z8.h
@@ -4708,11 +4708,11 @@ zip2 z31.s, z31.s, z31.s
# CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] UDIV_ZPmZ_S udiv z0.s, p7/m, z0.s, z31.s
# CHECK-NEXT: 1 20 7.00 20 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] UDIVR_ZPmZ_D udivr z0.d, p7/m, z0.d, z31.d
# CHECK-NEXT: 1 12 7.00 12 V1UnitV[7],V1UnitV0[7],V1UnitV01[7],V1UnitV02[7] UDIVR_ZPmZ_S udivr z0.s, p7/m, z0.s, z31.s
-# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZI_D udot z0.d, z1.h, z15.h[1]
-# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZ_D udot z0.d, z1.h, z31.h
+# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZI_HtoD udot z0.d, z1.h, z15.h[1]
+# CHECK-NEXT: 1 4 1.00 1 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UDOT_ZZZ_HtoD udot z0.d, z1.h, z31.h
# CHECK-NEXT: 1 3 1.00 3 V1UnitV,V1UnitV0,V1UnitV01,V1UnitV02 UCVTF_ZPmZ_StoD ucvtf z24.d, p5/m, z9.s
-# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZ_S udot z0.s, z1.b, z31.b
-# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZI_S udot z0.s, z1.b, z7.b[3]
+# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZ_BtoS udot z0.s, z1.b, z31.b
+# CHECK-NEXT: 1 3 0.50 1 V1UnitV,V1UnitV01 UDOT_ZZZI_BtoS udot z0.s, z1.b, z7.b[3]
# CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 UMAX_ZI_B umax z0.b, z0.b, #0
# CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 UMAX_ZPmZ_B umax z31.b, p7/m, z31.b, z31.b
# CHECK-NEXT: 1 2 0.50 2 V1UnitV,V1UnitV01 UMAX_ZI_B umax z31.b, z31.b, #255
>From 9811ce3572b2367e2a18d0a813d8c3cf077ddffc Mon Sep 17 00:00:00 2001
From: Ramkumar Ramachandra <ramkumar.ramachandra at codasip.com>
Date: Wed, 24 Sep 2025 11:33:24 +0100
Subject: [PATCH 21/35] [LV] Don't ignore invariant stores when costing
(#158682)
Invariant stores of reductions are removed early in the VPlan
construction, and there is no reason to ignore them while costing.
---
.../Transforms/Vectorize/LoopVectorize.cpp | 14 ------------
.../X86/invariant-store-vectorization.ll | 22 +++++++++++++++----
2 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 30fcc9b7680ed..ae9e70197d2cc 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -6381,19 +6381,8 @@ void LoopVectorizationCostModel::collectValuesToIgnore() {
LoopBlocksDFS DFS(TheLoop);
DFS.perform(LI);
- MapVector<Value *, SmallVector<Value *>> DeadInvariantStoreOps;
for (BasicBlock *BB : reverse(make_range(DFS.beginRPO(), DFS.endRPO())))
for (Instruction &I : reverse(*BB)) {
- // Find all stores to invariant variables. Since they are going to sink
- // outside the loop we do not need calculate cost for them.
- StoreInst *SI;
- if ((SI = dyn_cast<StoreInst>(&I)) &&
- Legal->isInvariantAddressOfReduction(SI->getPointerOperand())) {
- ValuesToIgnore.insert(&I);
- DeadInvariantStoreOps[SI->getPointerOperand()].push_back(
- SI->getValueOperand());
- }
-
if (VecValuesToIgnore.contains(&I) || ValuesToIgnore.contains(&I))
continue;
@@ -6440,9 +6429,6 @@ void LoopVectorizationCostModel::collectValuesToIgnore() {
append_range(DeadInterleavePointerOps, Op->operands());
}
- for (const auto &[_, Ops] : DeadInvariantStoreOps)
- llvm::append_range(DeadOps, drop_end(Ops));
-
// Mark ops that would be trivially dead and are only used by ignored
// instructions as free.
BasicBlock *Header = TheLoop->getHeader();
diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
index 199f1c15fbc3d..2f74049968544 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll
@@ -398,16 +398,30 @@ for.end: ; preds = %for.body
define void @test_store_of_final_reduction_value(i64 %x, ptr %dst) {
; CHECK-LABEL: define void @test_store_of_final_reduction_value(
; CHECK-SAME: i64 [[X:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[X]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[TMP0:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0]] = mul <2 x i64> [[VEC_PHI]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> [[TMP0]])
+; CHECK-NEXT: store i64 [[TMP1]], ptr [[DST]], align 8
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[SCALAR_PH]]:
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV4:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV4:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[RED_NEXT]] = mul i64 [[RED]], [[X]]
; CHECK-NEXT: store i64 [[RED_NEXT]], ptr [[DST]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV4]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV4]], 1
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP33:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
>From 576d621432189bfbd509ae0a2f53c90c0e402afa Mon Sep 17 00:00:00 2001
From: Matthew Devereau <matthew.devereau at arm.com>
Date: Wed, 24 Sep 2025 11:35:15 +0100
Subject: [PATCH 22/35] [InstSimplify] Consider vscale_range for get active
lane mask (#160073)
Scalable get_active_lane_mask intrinsic calls can be simplified to i1
splat (ptrue) when its constant range is larger than or equal to the
maximum possible number of elements, which can be inferred from
vscale_range(x, y)
---
llvm/lib/Analysis/InstructionSimplify.cpp | 19 +++++++-
.../InstSimplify/get_active_lane_mask.ll | 48 +++++++++++++++++++
.../LoopVectorize/AArch64/masked-call.ll | 36 +++++---------
.../LoopVectorize/AArch64/sve-vfabi.ll | 46 +++++++++---------
4 files changed, 100 insertions(+), 49 deletions(-)
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index a90b618607ad6..07f4a8e5c889e 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -6514,10 +6514,27 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
const CallBase *Call) {
unsigned BitWidth = ReturnType->getScalarSizeInBits();
switch (IID) {
- case Intrinsic::get_active_lane_mask:
+ case Intrinsic::get_active_lane_mask: {
if (match(Op1, m_Zero()))
return ConstantInt::getFalse(ReturnType);
+
+ const Function *F = Call->getFunction();
+ auto *ScalableTy = dyn_cast<ScalableVectorType>(ReturnType);
+ Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
+ if (ScalableTy && Attr.isValid()) {
+ std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
+ if (!VScaleMax)
+ break;
+ uint64_t MaxPossibleMaskElements =
+ (uint64_t)ScalableTy->getMinNumElements() * (*VScaleMax);
+
+ const APInt *Op1Val;
+ if (match(Op0, m_Zero()) && match(Op1, m_APInt(Op1Val)) &&
+ Op1Val->uge(MaxPossibleMaskElements))
+ return ConstantInt::getAllOnesValue(ReturnType);
+ }
break;
+ }
case Intrinsic::abs:
// abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
// It is always ok to pick the earlier abs. We'll just lose nsw if its only
diff --git a/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll b/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll
index a3b8e4efbe939..180012a4e8211 100644
--- a/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll
+++ b/llvm/test/Transforms/InstSimplify/get_active_lane_mask.ll
@@ -18,3 +18,51 @@ define <vscale x 8 x i1> @foo_nxv8i1(i32 %a) {
%mask = call <vscale x 8 x i1> @llvm.get.active.lane.mask.nxv8i1(i32 %a, i32 0)
ret <vscale x 8 x i1> %mask
}
+
+define <vscale x 16 x i1> @foo_vscale_max_255() vscale_range(1,16) {
+; CHECK-LABEL: define <vscale x 16 x i1> @foo_vscale_max_255(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[MASK:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 255)
+; CHECK-NEXT: ret <vscale x 16 x i1> [[MASK]]
+;
+ %mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1(i32 0, i32 255)
+ ret <vscale x 16 x i1> %mask
+}
+
+define <vscale x 16 x i1> @foo_vscale_max_256() vscale_range(1,16) {
+; CHECK-LABEL: define <vscale x 16 x i1> @foo_vscale_max_256(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: ret <vscale x 16 x i1> splat (i1 true)
+;
+ %mask = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1(i32 0, i32 256)
+ ret <vscale x 16 x i1> %mask
+}
+
+define <vscale x 2 x i1> @foo_vscale_max_nxv2i1_1_1_2() vscale_range(1,1) {
+; CHECK-LABEL: define <vscale x 2 x i1> @foo_vscale_max_nxv2i1_1_1_2(
+; CHECK-SAME: ) #[[ATTR1:[0-9]+]] {
+; CHECK-NEXT: ret <vscale x 2 x i1> splat (i1 true)
+;
+ %mask = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1(i32 0, i32 2)
+ ret <vscale x 2 x i1> %mask
+}
+
+define <vscale x 4 x i1> @foo_vscale_max_nxv4i1_2_4_16() vscale_range(2,4) {
+; CHECK-LABEL: define <vscale x 4 x i1> @foo_vscale_max_nxv4i1_2_4_16(
+; CHECK-SAME: ) #[[ATTR2:[0-9]+]] {
+; CHECK-NEXT: ret <vscale x 4 x i1> splat (i1 true)
+;
+ %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1(i128 0, i128 16)
+ ret <vscale x 4 x i1> %mask
+}
+
+define <vscale x 4 x i1> @foo_vscale_max_nxv4i1_2_4_1_16() vscale_range(2,4) {
+; CHECK-LABEL: define <vscale x 4 x i1> @foo_vscale_max_nxv4i1_2_4_1_16(
+; CHECK-SAME: ) #[[ATTR2]] {
+; CHECK-NEXT: [[MASK:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i128(i128 1, i128 16)
+; CHECK-NEXT: ret <vscale x 4 x i1> [[MASK]]
+;
+ %mask = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1(i128 1, i128 16)
+ ret <vscale x 4 x i1> %mask
+}
+
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
index fd02300232a84..4a1d56572c013 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
@@ -51,11 +51,10 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFCOMMON-NEXT: [[ENTRY:.*]]:
; TFCOMMON-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TFCOMMON-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFCOMMON-NEXT: br label %[[VECTOR_BODY:.*]]
; TFCOMMON: [[VECTOR_BODY]]:
; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFCOMMON-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
; TFCOMMON-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
@@ -76,12 +75,11 @@ define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFA_INTERLEAVE: [[VECTOR_BODY]]:
; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
@@ -179,11 +177,10 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFCOMMON-NEXT: [[ENTRY:.*]]:
; TFCOMMON-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TFCOMMON-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFCOMMON-NEXT: br label %[[VECTOR_BODY:.*]]
; TFCOMMON: [[VECTOR_BODY]]:
; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFCOMMON-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
; TFCOMMON-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], splat (i64 50)
@@ -207,12 +204,11 @@ define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFA_INTERLEAVE: [[VECTOR_BODY]]:
; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
@@ -332,11 +328,10 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFCOMMON-NEXT: [[ENTRY:.*]]:
; TFCOMMON-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TFCOMMON-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFCOMMON-NEXT: br label %[[VECTOR_BODY:.*]]
; TFCOMMON: [[VECTOR_BODY]]:
; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFCOMMON-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
; TFCOMMON-NEXT: [[TMP6:%.*]] = icmp ugt <vscale x 2 x i64> [[WIDE_MASKED_LOAD]], splat (i64 50)
@@ -363,12 +358,11 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFA_INTERLEAVE: [[VECTOR_BODY]]:
; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
@@ -608,11 +602,10 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFALWAYS-NEXT: [[ENTRY:.*]]:
; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TFALWAYS-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFALWAYS-NEXT: br label %[[VECTOR_BODY:.*]]
; TFALWAYS: [[VECTOR_BODY]]:
; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFALWAYS-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
; TFALWAYS-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
@@ -631,11 +624,10 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFFALLBACK-NEXT: [[ENTRY:.*]]:
; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFFALLBACK-NEXT: br label %[[VECTOR_BODY:.*]]
; TFFALLBACK: [[VECTOR_BODY]]:
; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i64> poison)
; TFFALLBACK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
@@ -656,12 +648,11 @@ define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025)
; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFA_INTERLEAVE: [[VECTOR_BODY]]:
; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
@@ -763,13 +754,12 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFALWAYS-NEXT: [[ENTRY:.*]]:
; TFALWAYS-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TFALWAYS-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFALWAYS-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[M]], i64 0
; TFALWAYS-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[BROADCAST_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
; TFALWAYS-NEXT: br label %[[VECTOR_BODY:.*]]
; TFALWAYS: [[VECTOR_BODY]]:
; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFALWAYS-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
; TFALWAYS-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
@@ -793,13 +783,12 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFFALLBACK-NEXT: [[ENTRY:.*]]:
; TFFALLBACK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
; TFFALLBACK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
-; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFFALLBACK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[M]], i64 0
; TFFALLBACK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[BROADCAST_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
; TFFALLBACK-NEXT: br label %[[VECTOR_BODY:.*]]
; TFFALLBACK: [[VECTOR_BODY]]:
; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP11:%.*]], %[[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
@@ -825,14 +814,13 @@ define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, doub
; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = shl nuw i64 [[TMP5]], 1
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP6]], i64 1025)
; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[M]], i64 0
; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[BROADCAST_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
; TFA_INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
; TFA_INTERLEAVE: [[VECTOR_BODY]]:
; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], %[[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, %[[ENTRY]] ], [ [[TMP24:%.*]], %[[VECTOR_BODY]] ]
; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll
index c3ace983fd911..7628b39cf4eb7 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vfabi.ll
@@ -7,21 +7,20 @@ define void @test_big_little_params(ptr readonly %a, ptr readonly %b, ptr noalia
; CHECK-LABEL: define void @test_big_little_params
; CHECK-SAME: (ptr readonly [[A:%.*]], ptr readonly [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 1025)
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP0]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i8> poison)
-; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @foo_vector(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i8> [[WIDE_MASKED_LOAD1]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
-; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP2]], ptr [[TMP3]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 4 x i1> [ splat (i1 true), [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP2]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i32> poison)
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i8> poison)
+; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 4 x i32> @foo_vector(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]], <vscale x 4 x i8> [[WIDE_MASKED_LOAD1]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP4]], ptr [[TMP5]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[EXIT:%.*]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -52,21 +51,20 @@ define void @test_little_big_params(ptr readonly %a, ptr readonly %b, ptr noalia
; CHECK-LABEL: define void @test_little_big_params
; CHECK-SAME: (ptr readonly [[A:%.*]], ptr readonly [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 1
-; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025)
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x float> @llvm.masked.load.nxv2f32.p0(ptr [[TMP0]], i32 4, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x float> poison)
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
-; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP1]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
-; CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x double> @bar_vector(<vscale x 2 x float> [[WIDE_MASKED_LOAD]], <vscale x 2 x double> [[WIDE_MASKED_LOAD1]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[C]], i64 [[INDEX]]
-; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP2]], ptr [[TMP3]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
-; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ splat (i1 true), [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x float> @llvm.masked.load.nxv2f32.p0(ptr [[TMP2]], i32 4, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x float> poison)
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[WIDE_MASKED_LOAD1:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison)
+; CHECK-NEXT: [[TMP4:%.*]] = call <vscale x 2 x double> @bar_vector(<vscale x 2 x float> [[WIDE_MASKED_LOAD]], <vscale x 2 x double> [[WIDE_MASKED_LOAD1]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[C]], i64 [[INDEX]]
+; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP4]], ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0
; CHECK-NEXT: br i1 [[TMP6]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
>From 7e6fd4bb902dd69a5c1eead3d71c79bec871ed52 Mon Sep 17 00:00:00 2001
From: Pradeep Kumar <pradeepku at nvidia.com>
Date: Wed, 24 Sep 2025 16:06:27 +0530
Subject: [PATCH 23/35] [LLVM][NVPTX-Tests] Fix tcgen05.mma lit tests committed
in 7be3c3a (#160464)
This commit fixes the ptxas ISA version mismatch committed in 7be3c3aa4fb311c0c84d9d1321d5b1dca1e03ad7. Locally verified with CUDA 12.8 and 12.9 toolkits
---
llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll | 4 ++--
llvm/test/CodeGen/NVPTX/tcgen05-mma.ll | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll
index 6d8e71fa31045..f6c219107a677 100644
--- a/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll
+++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma-block-scale-ptx88.ll
@@ -2,8 +2,8 @@
; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx88 | FileCheck %s
; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx88 | FileCheck %s
; RUN: llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 | FileCheck %s
-; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %}
-; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %}
+; RUN: %if ptxas-sm_100a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %}
+; RUN: %if ptxas-sm_101a && ptxas-isa-8.8 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %}
; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110a | %ptxas-verify -arch=sm_110a %}
define void @tcgen05_mma_mxf8f6f4_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d, ptr addrspace(6) %scale_a, ptr addrspace(6) %scale_b) {
diff --git a/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll b/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll
index 5f59636d3e321..711e566df5034 100644
--- a/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll
+++ b/llvm/test/CodeGen/NVPTX/tcgen05-mma.ll
@@ -2,8 +2,8 @@
; RUN: llc < %s -o - -mcpu=sm_100a -march=nvptx64 -mattr=+ptx86 | FileCheck %s
; RUN: llc < %s -o - -mcpu=sm_101a -march=nvptx64 -mattr=+ptx86 | FileCheck %s
; RUN: llc < %s -o - -mcpu=sm_110a -march=nvptx64 -mattr=+ptx90 | FileCheck %s
-; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %}
-; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx88 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %}
+; RUN: %if ptxas-sm_100a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_100a | %ptxas-verify -arch=sm_100a %}
+; RUN: %if ptxas-sm_101a && ptxas-isa-8.6 %{ llc < %s -march=nvptx64 -mattr=+ptx86 -mcpu=sm_101a | %ptxas-verify -arch=sm_101a %}
; RUN: %if ptxas-sm_110a && ptxas-isa-9.0 %{ llc < %s -march=nvptx64 -mattr=+ptx90 -mcpu=sm_110a | %ptxas-verify -arch=sm_110a %}
define void @tcgen05_mma_fp16_cta1(ptr addrspace(6) %dtmem, ptr addrspace(6) %atensor, i64 %ashared, i64 %b, i32 %idesc, i1 %enable_inp_d) {
>From 260ffa4d3fdbf6f915d4bdbeab555d9618aa4da8 Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Wed, 24 Sep 2025 12:37:18 +0200
Subject: [PATCH 24/35] [clang][bytecode][test] Disable a bitcast test on s390x
(#160452)
Fixes #160374
---
clang/test/AST/ByteCode/builtin-bit-cast.cpp | 3 +++
1 file changed, 3 insertions(+)
diff --git a/clang/test/AST/ByteCode/builtin-bit-cast.cpp b/clang/test/AST/ByteCode/builtin-bit-cast.cpp
index a12f305caf877..c1d29b2ca4c00 100644
--- a/clang/test/AST/ByteCode/builtin-bit-cast.cpp
+++ b/clang/test/AST/ByteCode/builtin-bit-cast.cpp
@@ -556,6 +556,8 @@ namespace VectorCast {
}
static_assert(test2() == 0);
+ /// On s390x, S is only 8 bytes.
+#if !defined(__s390x__)
struct S {
unsigned __int128 a : 3;
};
@@ -569,6 +571,7 @@ namespace VectorCast {
static_assert(s.a == 0); // ref-error {{not an integral constant expression}} \
// ref-note {{initializer of 's' is not a constant expression}}
#endif
+#endif
}
#endif
>From ff92b6b413e327b400a17aeaeaf0923b62e75945 Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 24 Sep 2025 12:37:45 +0200
Subject: [PATCH 25/35] [LangRef] Do not make align imply dereferenceability
(#158062)
We currently specify that something like `load i8, align 16384` implies
that the object is actually dereferenceable up to 16384 bytes, rather
than only the one byte implied by the load type.
We should stop doing that, because it makes it invalid to infer
alignments larger than the load/store type, which is something we do
(and want to do).
There is some SDAG code that does make use of this property by
widening accesses and extracting part of them. However, I believe we
should be justifying that based on target-specific guarantees, rather
than a generic IR property. (The reasoning goes something like this:
Typically, memory protection has page granularity, so widening a load to
the alignment will not trap, as long as the alignment is not larger than
the page size, which is true for any practically interesting access
size.)
Fixes https://github.com/llvm/llvm-project/issues/90446.
---
llvm/docs/LangRef.rst | 18 +++++++-----------
1 file changed, 7 insertions(+), 11 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index b32a27f9555fd..2fe3611be80b0 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -11397,11 +11397,9 @@ responsibility of the code emitter to ensure that the alignment information is
correct. Overestimating the alignment results in undefined behavior.
Underestimating the alignment may produce less efficient code. An alignment of
1 is always safe. The maximum possible alignment is ``1 << 32``. An alignment
-value higher than the size of the loaded type implies memory up to the
-alignment value bytes can be safely loaded without trapping in the default
-address space. Access of the high bytes can interfere with debugging tools, so
-should not be accessed if the function has the ``sanitize_thread`` or
-``sanitize_address`` attributes.
+value higher than the size of the loaded type does *not* imply (without target
+specific knowledge) that memory up to the alignment value bytes can be safely
+loaded without trapping.
The alignment is only optional when parsing textual IR; for in-memory IR, it is
always present. An omitted ``align`` argument means that the operation has the
@@ -11537,12 +11535,10 @@ operation (that is, the alignment of the memory address). It is the
responsibility of the code emitter to ensure that the alignment information is
correct. Overestimating the alignment results in undefined behavior.
Underestimating the alignment may produce less efficient code. An alignment of
-1 is always safe. The maximum possible alignment is ``1 << 32``. An alignment
-value higher than the size of the loaded type implies memory up to the
-alignment value bytes can be safely loaded without trapping in the default
-address space. Access of the high bytes can interfere with debugging tools, so
-should not be accessed if the function has the ``sanitize_thread`` or
-``sanitize_address`` attributes.
+1 is always safe. The maximum possible alignment is ``1 << 32``. An alignment
+value higher than the size of the stored type does *not* imply (without target
+specific knowledge) that memory up to the alignment value bytes can be safely
+loaded without trapping.
The alignment is only optional when parsing textual IR; for in-memory IR, it is
always present. An omitted ``align`` argument means that the operation has the
>From 78aba4374cf65bf04a0cd8e7de0102eccce5c38b Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 24 Sep 2025 12:39:05 +0200
Subject: [PATCH 26/35] [AssumptionCache] Don't use ResultElem for assumption
list (NFC) (#160462)
ResultElem stores a weak handle of an assume, plus an index for
referring to a specific operand bundle. This makes sense for the results
of assumptionsFor(), which refers to specific operands of assumes.
However, assumptions() is a plain list of assumes. It does *not* contain
separate entries for each operand bundles. The operand bundle index is
always ExprResultIdx.
As such, we should be directly using WeakVH for this case, without the
additional wrapper.
---
llvm/include/llvm/Analysis/AssumptionCache.h | 4 ++--
llvm/lib/Analysis/AssumptionCache.cpp | 4 ++--
llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp | 4 ++--
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/llvm/include/llvm/Analysis/AssumptionCache.h b/llvm/include/llvm/Analysis/AssumptionCache.h
index 1b026ef76a45e..be33be3bf2e87 100644
--- a/llvm/include/llvm/Analysis/AssumptionCache.h
+++ b/llvm/include/llvm/Analysis/AssumptionCache.h
@@ -65,7 +65,7 @@ class AssumptionCache {
/// Vector of weak value handles to calls of the \@llvm.assume
/// intrinsic.
- SmallVector<ResultElem, 4> AssumeHandles;
+ SmallVector<WeakVH, 4> AssumeHandles;
class LLVM_ABI AffectedValueCallbackVH final : public CallbackVH {
AssumptionCache *AC;
@@ -148,7 +148,7 @@ class AssumptionCache {
/// FIXME: We should replace this with pointee_iterator<filter_iterator<...>>
/// when we can write that to filter out the null values. Then caller code
/// will become simpler.
- MutableArrayRef<ResultElem> assumptions() {
+ MutableArrayRef<WeakVH> assumptions() {
if (!Scanned)
scanFunction();
return AssumeHandles;
diff --git a/llvm/lib/Analysis/AssumptionCache.cpp b/llvm/lib/Analysis/AssumptionCache.cpp
index 980a891266e50..45ff9161db97c 100644
--- a/llvm/lib/Analysis/AssumptionCache.cpp
+++ b/llvm/lib/Analysis/AssumptionCache.cpp
@@ -172,7 +172,7 @@ void AssumptionCache::scanFunction() {
for (BasicBlock &B : F)
for (Instruction &I : B)
if (isa<AssumeInst>(&I))
- AssumeHandles.push_back({&I, ExprResultIdx});
+ AssumeHandles.push_back(&I);
// Mark the scan as complete.
Scanned = true;
@@ -188,7 +188,7 @@ void AssumptionCache::registerAssumption(AssumeInst *CI) {
if (!Scanned)
return;
- AssumeHandles.push_back({CI, ExprResultIdx});
+ AssumeHandles.push_back(CI);
#ifndef NDEBUG
assert(CI->getParent() &&
diff --git a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp
index c2e58ba393553..8b3bd50a7e53f 100644
--- a/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp
+++ b/llvm/lib/Transforms/Scalar/DropUnnecessaryAssumes.cpp
@@ -21,8 +21,8 @@ DropUnnecessaryAssumesPass::run(Function &F, FunctionAnalysisManager &FAM) {
AssumptionCache &AC = FAM.getResult<AssumptionAnalysis>(F);
bool Changed = false;
- for (AssumptionCache::ResultElem &Elem : AC.assumptions()) {
- auto *Assume = cast_or_null<AssumeInst>(Elem.Assume);
+ for (const WeakVH &Elem : AC.assumptions()) {
+ auto *Assume = cast_or_null<AssumeInst>(Elem);
if (!Assume)
continue;
>From d104f7c7c2d5ec576c369458bc3d0e712a2dc1e9 Mon Sep 17 00:00:00 2001
From: David Green <david.green at arm.com>
Date: Wed, 24 Sep 2025 11:40:12 +0100
Subject: [PATCH 27/35] [Debug][AArch64] Do not crash on unknown subreg
register sizes. (#160442)
The AArch64 zsub regs are scalable, so defined with a size of -1 (which
comes through as 65535). The RegisterSize is only 128, so code to try
and find overlapping regs of a z30_z31 in DwarfEmitter can crash on
trying to access out of range bits in a BitVector. Hexagon and x86 also
contain subregs with unknown sizes.
Ideally most of these would be scalable values but in the meantime add a
check that the register are small enough to overlap with the current
register size, to prevent us from crashing.
This fixes the issue reported on #153810.
---
.../CodeGen/AsmPrinter/DwarfExpression.cpp | 2 +-
.../CodeGen/AArch64/debug-info-sve-pair.mir | 344 ++++++++++++++++++
2 files changed, 345 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/AArch64/debug-info-sve-pair.mir
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index 8a30714db2fdf..1703b27d350f3 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -154,7 +154,7 @@ bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
unsigned Size = TRI.getSubRegIdxSize(Idx);
unsigned Offset = TRI.getSubRegIdxOffset(Idx);
Reg = TRI.getDwarfRegNum(SR, false);
- if (Reg < 0)
+ if (Reg < 0 || Offset + Size > RegSize)
continue;
// Used to build the intersection between the bits we already
diff --git a/llvm/test/CodeGen/AArch64/debug-info-sve-pair.mir b/llvm/test/CodeGen/AArch64/debug-info-sve-pair.mir
new file mode 100644
index 0000000000000..113f343bac73e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/debug-info-sve-pair.mir
@@ -0,0 +1,344 @@
+# RUN: llc -start-before=aarch64-asm-printer -o - %s | FileCheck %s
+
+# Check that z30_z31 debug info does not crash.
+
+# CHECK: .Ldebug_loc0:
+# CHECK: .byte 4 // DW_LLE_offset_pair
+# CHECK: .uleb128 .Ltmp2-.Lfunc_begin0 // starting offset
+# CHECK: .uleb128 .Ltmp3-.Lfunc_begin0 // ending offset
+# CHECK: .byte 2 // Loc expr size
+# CHECK: .byte 144 // DW_OP_regx
+# CHECK: .byte 126 // 126
+# CHECK: .byte 4 // DW_LLE_offset_pair
+# CHECK: .uleb128 .Ltmp3-.Lfunc_begin0 // starting offset
+# CHECK: .uleb128 .Lfunc_end0-.Lfunc_begin0 // ending offset
+# CHECK: .byte 6 // Loc expr size
+# CHECK: .byte 144 // sub-register DW_OP_regx
+# CHECK: .byte 94 // 94
+# CHECK: .byte 147 // DW_OP_piece
+# CHECK: .byte 16 // 16
+# CHECK: .byte 147 // DW_OP_piece
+# CHECK: .byte 31 // 31
+# CHECK: .byte 0 // DW_LLE_end_of_list
+
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+ target triple = "aarch64"
+
+ define void @_Z10Sort16RowsILi6EEv12SharedTraitsI10TraitsLaneEP22Trans_NS_hwy_float16_tiS4_(i8 %st.coerce, ptr noundef %keys, i32 noundef %0, ptr noundef %1) #2 !dbg !2 {
+ unreachable
+ }
+
+ attributes #2 = { mustprogress uwtable vscale_range(1,16) "frame-pointer"="non-leaf" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="neoverse-n1" "target-features"="+aes,+crc,+dotprod,+fp-armv8,+fullfp16,+lse,+neon,+perfmon,+ras,+rcpc,+rdm,+sha2,+spe,+ssbs,+sve,+sve-aes,+sve2,+sve2-aes,+v8.1a,+v8.2a,+v8a,-fmv" "tune-cpu"="generic" }
+
+ !llvm.dbg.cu = !{!3}
+ !llvm.module.flags = !{!4, !5, !6, !7, !8, !9}
+ !llvm.ident = !{!10}
+
+ !2 = distinct !DISubprogram(name: "Sort16Rows<6>", linkageName: "_Z10Sort16RowsILi6EEv12SharedTraitsI10TraitsLaneEP22Trans_NS_hwy_float16_tiS4_", scope: !12, file: !12, line: 369, type: !18, scopeLine: 370, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !19, retainedNodes: !20, keyInstructions: true)
+ !3 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !14, producer: "clang version 22.0.0git (https://github.com/llvm/llvm-project.git)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None)
+ !4 = !{i32 7, !"Dwarf Version", i32 5}
+ !5 = !{i32 2, !"Debug Info Version", i32 3}
+ !6 = !{i32 1, !"wchar_size", i32 4}
+ !7 = !{i32 7, !"uwtable", i32 2}
+ !8 = !{i32 7, !"frame-pointer", i32 1}
+ !9 = !{i32 7, !"debug-info-assignment-tracking", i1 true}
+ !10 = !{!"clang version 22.0.0git (https://github.com/llvm/llvm-project.git)"}
+ !12 = !DIFile(filename: "example.cpp", directory: "/app", checksumkind: CSK_MD5, checksum: "5fbaafea0ede06ddd1ffc371aeee276e")
+ !14 = !DIFile(filename: "/app/example.cpp", directory: "/app", checksumkind: CSK_MD5, checksum: "5fbaafea0ede06ddd1ffc371aeee276e")
+ !17 = !DIBasicType(name: "__fp16", size: 16, encoding: DW_ATE_float)
+ !18 = !DISubroutineType(types: !21)
+ !19 = !{!120}
+ !20 = !{!77, !78, !79, !80, !81, !82, !83, !84, !85, !86, !87, !88, !89, !90, !91, !92, !93, !94, !95, !96, !97, !98, !99, !100, !101, !102, !103, !104, !105}
+ !21 = !{null, !22, !23, !24, !23}
+ !22 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "SharedTraits<TraitsLane>", file: !12, line: 272, size: 8, flags: DIFlagTypePassByValue, elements: !25, templateParams: !26, identifier: "_ZTS12SharedTraitsI10TraitsLaneE")
+ !23 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !55, size: 64)
+ !24 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+ !25 = !{!27}
+ !26 = !{!76}
+ !27 = !DIDerivedType(tag: DW_TAG_inheritance, scope: !22, baseType: !28, extraData: i32 0)
+ !28 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "TraitsLane", file: !12, line: 325, size: 8, flags: DIFlagTypePassByValue, elements: !29, identifier: "_ZTS10TraitsLane")
+ !29 = !{!30, !31, !32, !33}
+ !30 = !DIDerivedType(tag: DW_TAG_inheritance, scope: !28, baseType: !34, extraData: i32 0)
+ !31 = !DISubprogram(name: "Sort2", linkageName: "_ZN10TraitsLane5Sort2E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EERu13__SVFloat16_tS4_", scope: !28, file: !12, line: 326, type: !70, scopeLine: 326, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+ !32 = !DISubprogram(name: "SortPairsDistance1", linkageName: "_ZN10TraitsLane18SortPairsDistance1E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 344, type: !74, scopeLine: 344, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+ !33 = !DISubprogram(name: "SortPairsDistance4", linkageName: "_ZN10TraitsLane18SortPairsDistance4E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 352, type: !74, scopeLine: 352, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+ !34 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "KeyLane", file: !12, line: 307, size: 8, flags: DIFlagTypePassByValue, elements: !35, identifier: "_ZTS7KeyLane")
+ !35 = !{!36, !37, !38}
+ !36 = !DISubprogram(name: "SwapAdjacentPairs", linkageName: "_ZN7KeyLane17SwapAdjacentPairsE4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !34, file: !12, line: 309, type: !39, scopeLine: 309, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+ !37 = !DISubprogram(name: "SwapAdjacentPairs", linkageName: "_ZN7KeyLane17SwapAdjacentPairsEu13__SVFloat32_t", scope: !34, file: !12, line: 314, type: !58, scopeLine: 314, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+ !38 = !DISubprogram(name: "OddEvenPairs", linkageName: "_ZN7KeyLane12OddEvenPairsE4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_tS3_", scope: !34, file: !12, line: 318, type: !68, scopeLine: 318, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized)
+ !39 = !DISubroutineType(types: !40)
+ !40 = !{!41, !42, !43, !41}
+ !41 = !DIDerivedType(tag: DW_TAG_typedef, name: "Vec<Simd<Trans_NS_hwy_float16_t, 1, 0> >", file: !12, line: 270, baseType: !44)
+ !42 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !34, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !43 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd<Trans_NS_hwy_float16_t, 1, 0>", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !51, identifier: "_ZTS4SimdI22Trans_NS_hwy_float16_tLi1ELi0EE")
+ !44 = !DIDerivedType(tag: DW_TAG_typedef, name: "VFromD<Simd<Trans_NS_hwy_float16_t, 1, 0> >", file: !12, line: 142, baseType: !45)
+ !45 = !DIDerivedType(tag: DW_TAG_typedef, name: "svfloat16_t", file: !12, line: 26, baseType: !46)
+ !46 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVFloat16_t", file: !12, baseType: !47)
+ !47 = !DICompositeType(tag: DW_TAG_array_type, baseType: !17, flags: DIFlagVector, elements: !48)
+ !48 = !{!49}
+ !49 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 4, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus))
+ !50 = !{}
+ !51 = !{!52, !53, !54}
+ !52 = !DITemplateTypeParameter(name: "Lane", type: !55)
+ !53 = !DITemplateValueParameter(type: !24, value: i32 1)
+ !54 = !DITemplateValueParameter(name: "kPow2", type: !24, value: i32 0)
+ !55 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Trans_NS_hwy_float16_t", file: !12, line: 6, size: 16, flags: DIFlagTypePassByValue, elements: !56, identifier: "_ZTS22Trans_NS_hwy_float16_t")
+ !56 = !{!57}
+ !57 = !DIDerivedType(tag: DW_TAG_member, name: "native", scope: !55, file: !12, line: 7, baseType: !17, size: 16)
+ !58 = !DISubroutineType(types: !59)
+ !59 = !{!60, !42, !60}
+ !60 = !DIDerivedType(tag: DW_TAG_typedef, name: "Vec<Simd<float, 0, 0> >", file: !12, line: 270, baseType: !61)
+ !61 = !DIDerivedType(tag: DW_TAG_typedef, name: "VFromD<Simd<float, 0, 0> >", file: !12, line: 142, baseType: !62)
+ !62 = !DIDerivedType(tag: DW_TAG_typedef, name: "svfloat32_t", file: !12, line: 27, baseType: !63)
+ !63 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVFloat32_t", file: !12, baseType: !64)
+ !64 = !DICompositeType(tag: DW_TAG_array_type, baseType: !65, flags: DIFlagVector, elements: !66)
+ !65 = !DIBasicType(name: "float", size: 32, encoding: DW_ATE_float)
+ !66 = !{!67}
+ !67 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 2, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus))
+ !68 = !DISubroutineType(types: !69)
+ !69 = !{!41, !42, !43, !41, !41}
+ !70 = !DISubroutineType(types: !71)
+ !71 = !{null, !72, !43, !73, !73}
+ !72 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !28, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !73 = !DIDerivedType(tag: DW_TAG_reference_type, baseType: !41, size: 64)
+ !74 = !DISubroutineType(types: !75)
+ !75 = !{!41, !72, !43, !41}
+ !76 = !DITemplateTypeParameter(name: "Base", type: !28)
+ !77 = !DILocalVariable(name: "st", arg: 1, scope: !2, file: !12, line: 369, type: !22)
+ !78 = !DILocalVariable(name: "keys", arg: 2, scope: !2, file: !12, line: 369, type: !23)
+ !79 = !DILocalVariable(arg: 3, scope: !2, file: !12, line: 369, type: !24)
+ !80 = !DILocalVariable(arg: 4, scope: !2, file: !12, line: 370, type: !23)
+ !81 = !DILocalVariable(name: "d", scope: !2, file: !12, line: 371, type: !106)
+ !82 = !DILocalVariable(name: "v8", scope: !2, file: !12, line: 373, type: !112)
+ !83 = !DILocalVariable(name: "v9", scope: !2, file: !12, line: 373, type: !112)
+ !84 = !DILocalVariable(name: "va", scope: !2, file: !12, line: 373, type: !112)
+ !85 = !DILocalVariable(name: "vb", scope: !2, file: !12, line: 373, type: !112)
+ !86 = !DILocalVariable(name: "vc", scope: !2, file: !12, line: 373, type: !112)
+ !87 = !DILocalVariable(name: "vd", scope: !2, file: !12, line: 373, type: !112)
+ !88 = !DILocalVariable(name: "ve", scope: !2, file: !12, line: 373, type: !112)
+ !89 = !DILocalVariable(name: "vf", scope: !2, file: !12, line: 373, type: !112)
+ !90 = !DILocalVariable(name: "v2", scope: !2, file: !12, line: 373, type: !112)
+ !91 = !DILocalVariable(name: "v4", scope: !2, file: !12, line: 373, type: !112)
+ !92 = !DILocalVariable(name: "v7", scope: !2, file: !12, line: 373, type: !112)
+ !93 = !DILocalVariable(name: "v0", scope: !2, file: !12, line: 374, type: !112)
+ !94 = !DILocalVariable(name: "v3", scope: !2, file: !12, line: 375, type: !112)
+ !95 = !DILocalVariable(name: "v5", scope: !2, file: !12, line: 376, type: !112)
+ !96 = !DILocalVariable(name: "v6", scope: !2, file: !12, line: 377, type: !112)
+ !97 = !DILocalVariable(name: "kIota", scope: !2, file: !12, line: 378, type: !112)
+ !98 = !DILocalVariable(name: "m8", scope: !2, file: !12, line: 379, type: !113)
+ !99 = !DILocalVariable(name: "m9", scope: !2, file: !12, line: 380, type: !113)
+ !100 = !DILocalVariable(name: "ma", scope: !2, file: !12, line: 381, type: !113)
+ !101 = !DILocalVariable(name: "mb", scope: !2, file: !12, line: 382, type: !113)
+ !102 = !DILocalVariable(name: "mc", scope: !2, file: !12, line: 383, type: !113)
+ !103 = !DILocalVariable(name: "md", scope: !2, file: !12, line: 384, type: !113)
+ !104 = !DILocalVariable(name: "me", scope: !2, file: !12, line: 385, type: !113)
+ !105 = !DILocalVariable(name: "mf", scope: !2, file: !12, line: 386, type: !113)
+ !106 = !DIDerivedType(tag: DW_TAG_typedef, name: "CappedTag<Trans_NS_hwy_float16_t, 6>", file: !12, line: 97, baseType: !107)
+ !107 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !108, file: !12, line: 89, baseType: !43)
+ !108 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "ClampNAndPow2<Trans_NS_hwy_float16_t, 1>", file: !12, line: 88, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !109, identifier: "_ZTS13ClampNAndPow2I22Trans_NS_hwy_float16_tLi1EE")
+ !109 = !{!110, !111}
+ !110 = !DITemplateTypeParameter(name: "T", type: !55)
+ !111 = !DITemplateValueParameter(name: "N", type: !24, value: i32 1)
+ !112 = !DIDerivedType(tag: DW_TAG_typedef, name: "V", scope: !2, file: !12, line: 372, baseType: !41)
+ !113 = !DIDerivedType(tag: DW_TAG_typedef, name: "Mask<Simd<Trans_NS_hwy_float16_t, 1, 0> >", file: !12, line: 271, baseType: !114)
+ !114 = !DIDerivedType(tag: DW_TAG_typedef, name: "svbool_t", file: !12, line: 28, baseType: !115)
+ !115 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVBool_t", file: !12, baseType: !116)
+ !116 = !DICompositeType(tag: DW_TAG_array_type, baseType: !117, flags: DIFlagVector, elements: !118)
+ !117 = !DIBasicType(name: "unsigned char", size: 8, encoding: DW_ATE_unsigned_char)
+ !118 = !{!119}
+ !119 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 1, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus))
+ !120 = !DITemplateValueParameter(name: "kKeysPerRow", type: !24, value: i32 6)
+ !121 = !DILocalVariable(name: "this", arg: 1, scope: !122, type: !123, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !122 = distinct !DISubprogram(name: "Sort2", linkageName: "_ZN10TraitsLane5Sort2E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EERu13__SVFloat16_tS4_", scope: !28, file: !12, line: 326, type: !70, scopeLine: 328, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !31, retainedNodes: !124, keyInstructions: true)
+ !123 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !28, size: 64)
+ !124 = !{!121, !125, !126, !127, !128, !129, !130, !131, !132}
+ !125 = !DILocalVariable(name: "d", arg: 2, scope: !122, file: !12, line: 326, type: !43)
+ !126 = !DILocalVariable(name: "a", arg: 3, scope: !122, file: !12, line: 327, type: !73)
+ !127 = !DILocalVariable(name: "b", arg: 4, scope: !122, file: !12, line: 328, type: !73)
+ !128 = !DILocalVariable(name: "__trans_tmp_52", scope: !122, file: !12, line: 329, type: !41)
+ !129 = !DILocalVariable(name: "a_copy", scope: !122, file: !12, line: 329, type: !41)
+ !130 = !DILocalVariable(name: "__trans_tmp_45", scope: !122, file: !12, line: 330, type: !41)
+ !131 = !DILocalVariable(name: "__trans_tmp_53", scope: !133, file: !12, line: 334, type: !41)
+ !132 = !DILocalVariable(name: "__trans_tmp_29", scope: !134, file: !12, line: 336, type: !45)
+ !133 = distinct !DILexicalBlock(scope: !122, file: !12, line: 333, column: 5)
+ !134 = distinct !DILexicalBlock(scope: !133, file: !12, line: 335, column: 7)
+ !137 = distinct !DISubprogram(name: "SortPairsDistance1", linkageName: "_ZN10TraitsLane18SortPairsDistance1E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 344, type: !74, scopeLine: 345, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !32, retainedNodes: !139, keyInstructions: true)
+ !139 = !{!140, !141, !142, !143}
+ !140 = !DILocalVariable(name: "this", arg: 1, scope: !137, type: !123, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !141 = !DILocalVariable(name: "d", arg: 2, scope: !137, file: !12, line: 344, type: !43)
+ !142 = !DILocalVariable(name: "v", arg: 3, scope: !137, file: !12, line: 345, type: !41)
+ !143 = !DILocalVariable(name: "__trans_tmp_48", scope: !137, file: !12, line: 346, type: !41)
+ !144 = distinct !DISubprogram(name: "Merge16x16<6, SharedTraits<TraitsLane>, __SVFloat16_t>", linkageName: "_Z10Merge16x16ILi6E12SharedTraitsI10TraitsLaneEu13__SVFloat16_tEvT0_RT1_S6_S6_S6_S6_S6_S6_S6_S6_S6_S6_S6_", scope: !12, file: !12, line: 286, type: !146, scopeLine: 288, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !147, retainedNodes: !148, keyInstructions: true)
+ !145 = distinct !DILocation(line: 388, column: 3, scope: !2)
+ !146 = !DISubroutineType(types: !149)
+ !147 = !{!164, !165, !166}
+ !148 = !{!151, !152, !153, !154, !155, !156, !157, !158, !159, !160, !161, !162, !163}
+ !149 = !{null, !22, !150, !150, !150, !150, !150, !150, !150, !150, !150, !150, !150, !150}
+ !150 = !DIDerivedType(tag: DW_TAG_reference_type, baseType: !47, size: 64)
+ !151 = !DILocalVariable(name: "st", arg: 1, scope: !144, file: !12, line: 286, type: !22)
+ !152 = !DILocalVariable(name: "v0", arg: 2, scope: !144, file: !12, line: 286, type: !150)
+ !153 = !DILocalVariable(name: "v2", arg: 3, scope: !144, file: !12, line: 286, type: !150)
+ !154 = !DILocalVariable(name: "v5", arg: 4, scope: !144, file: !12, line: 286, type: !150)
+ !155 = !DILocalVariable(name: "v6", arg: 5, scope: !144, file: !12, line: 287, type: !150)
+ !156 = !DILocalVariable(name: "v7", arg: 6, scope: !144, file: !12, line: 287, type: !150)
+ !157 = !DILocalVariable(name: "v9", arg: 7, scope: !144, file: !12, line: 287, type: !150)
+ !158 = !DILocalVariable(name: "va", arg: 8, scope: !144, file: !12, line: 287, type: !150)
+ !159 = !DILocalVariable(name: "vb", arg: 9, scope: !144, file: !12, line: 287, type: !150)
+ !160 = !DILocalVariable(name: "vc", arg: 10, scope: !144, file: !12, line: 288, type: !150)
+ !161 = !DILocalVariable(name: "vd", arg: 11, scope: !144, file: !12, line: 288, type: !150)
+ !162 = !DILocalVariable(name: "ve", arg: 12, scope: !144, file: !12, line: 288, type: !150)
+ !163 = !DILocalVariable(name: "vf", arg: 13, scope: !144, file: !12, line: 288, type: !150)
+ !164 = !DITemplateValueParameter(type: !24, value: i32 6)
+ !165 = !DITemplateTypeParameter(name: "Traits", type: !22)
+ !166 = !DITemplateTypeParameter(name: "V", type: !47)
+ !184 = !DILocalVariable(name: "this", arg: 1, scope: !185, type: !186, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !185 = distinct !DISubprogram(name: "SortPairsDistance2<Simd<Trans_NS_hwy_float16_t, 1, 0> >", linkageName: "_ZN12SharedTraitsI10TraitsLaneE18SortPairsDistance2I4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEEEDTcl4ZerocvT__EEES6_S7_", scope: !22, file: !12, line: 273, type: !187, scopeLine: 273, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !188, declaration: !189, retainedNodes: !190, keyInstructions: true)
+ !186 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !22, size: 64)
+ !187 = !DISubroutineType(types: !191)
+ !188 = !{!193}
+ !189 = !DISubprogram(name: "SortPairsDistance2<Simd<Trans_NS_hwy_float16_t, 1, 0> >", linkageName: "_ZN12SharedTraitsI10TraitsLaneE18SortPairsDistance2I4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEEEDTcl4ZerocvT__EEES6_S7_", scope: !22, file: !12, line: 273, type: !187, scopeLine: 273, flags: DIFlagPrototyped, spFlags: DISPFlagOptimized, templateParams: !188)
+ !190 = !{!184, !194, !195, !196, !197}
+ !191 = !{!41, !192, !43, !41}
+ !192 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !22, size: 64, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !193 = !DITemplateTypeParameter(name: "D", type: !43)
+ !194 = !DILocalVariable(name: "d", arg: 2, scope: !185, file: !12, line: 273, type: !43)
+ !195 = !DILocalVariable(name: "v", arg: 3, scope: !185, file: !12, line: 273, type: !41)
+ !196 = !DILocalVariable(name: "base", scope: !185, file: !12, line: 274, type: !28)
+ !197 = !DILocalVariable(name: "swapped", scope: !185, file: !12, line: 275, type: !41)
+ !200 = !DILocation(line: 0, scope: !122, inlinedAt: !201)
+ !201 = distinct !DILocation(line: 358, column: 5, scope: !202, inlinedAt: !203)
+ !202 = distinct !DISubprogram(name: "SortPairsDistance4", linkageName: "_ZN10TraitsLane18SortPairsDistance4E4SimdI22Trans_NS_hwy_float16_tLi1ELi0EEu13__SVFloat16_t", scope: !28, file: !12, line: 352, type: !74, scopeLine: 353, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !33, retainedNodes: !204, keyInstructions: true)
+ !203 = distinct !DILocation(line: 298, column: 11, scope: !144, inlinedAt: !145)
+ !204 = !{!205, !206, !207, !208, !209, !210, !211}
+ !205 = !DILocalVariable(name: "this", arg: 1, scope: !202, type: !123, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !206 = !DILocalVariable(name: "d", arg: 2, scope: !202, file: !12, line: 352, type: !43)
+ !207 = !DILocalVariable(name: "v", arg: 3, scope: !202, file: !12, line: 353, type: !41)
+ !208 = !DILocalVariable(name: "__trans_tmp_42", scope: !202, file: !12, line: 354, type: !41)
+ !209 = !DILocalVariable(name: "__trans_tmp_39", scope: !202, file: !12, line: 354, type: !41)
+ !210 = !DILocalVariable(name: "dw", scope: !202, file: !12, line: 355, type: !212)
+ !211 = !DILocalVariable(name: "__trans_tmp_51", scope: !219, file: !12, line: 360, type: !44)
+ !212 = !DIDerivedType(tag: DW_TAG_typedef, name: "RepartitionToWide<Simd<Trans_NS_hwy_float16_t, 1, 0> >", file: !12, line: 103, baseType: !213)
+ !213 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition<float, Simd<Trans_NS_hwy_float16_t, 1, 0> >", file: !12, line: 101, baseType: !214)
+ !214 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition<float>", scope: !43, file: !12, line: 86, baseType: !215)
+ !215 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd<float, 0, 0>", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !216, identifier: "_ZTS4SimdIfLi0ELi0EE")
+ !216 = !{!217, !218, !54}
+ !217 = !DITemplateTypeParameter(name: "Lane", type: !65)
+ !218 = !DITemplateValueParameter(type: !24, value: i32 0)
+ !219 = distinct !DILexicalBlock(scope: !202, file: !12, line: 359, column: 5)
+ !220 = !DILocalVariable(name: "this", arg: 1, scope: !221, type: !222, flags: DIFlagArtificial | DIFlagObjectPointer)
+ !221 = distinct !DISubprogram(name: "SwapAdjacentPairs", linkageName: "_ZN7KeyLane17SwapAdjacentPairsEu13__SVFloat32_t", scope: !34, file: !12, line: 314, type: !58, scopeLine: 314, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, declaration: !37, retainedNodes: !223, keyInstructions: true)
+ !222 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !34, size: 64)
+ !223 = !{!220, !224}
+ !224 = !DILocalVariable(name: "v", arg: 2, scope: !221, file: !12, line: 314, type: !60)
+ !225 = distinct !DILocation(line: 357, column: 38, scope: !202, inlinedAt: !203)
+ !226 = !DILocalVariable(name: "v", arg: 1, scope: !227, file: !12, line: 264, type: !64)
+ !227 = distinct !DISubprogram(name: "Shuffle1032<__SVFloat32_t>", linkageName: "_Z11Shuffle1032Iu13__SVFloat32_tET_S1_", scope: !12, file: !12, line: 264, type: !228, scopeLine: 264, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !229, retainedNodes: !230, keyInstructions: true)
+ !228 = !DISubroutineType(types: !231)
+ !229 = !{!262}
+ !230 = !{!226, !232, !233, !234}
+ !231 = !{!64, !64}
+ !232 = !DILocalVariable(name: "d", scope: !227, file: !12, line: 265, type: !235)
+ !233 = !DILocalVariable(name: "d8", scope: !227, file: !12, line: 266, type: !252)
+ !234 = !DILocalVariable(name: "v8", scope: !227, file: !12, line: 267, type: !257)
+ !235 = !DIDerivedType(tag: DW_TAG_typedef, name: "DFromV<__SVFloat32_t>", file: !12, line: 108, baseType: !236)
+ !236 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !237, file: !12, line: 116, baseType: !238)
+ !237 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "DFromV_t<__SVFloat32_t>", file: !12, line: 115, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !239, identifier: "_ZTS8DFromV_tIu13__SVFloat32_tE")
+ !238 = !DIDerivedType(tag: DW_TAG_typedef, name: "ScalableTag<float>", file: !12, line: 95, baseType: !241)
+ !239 = !{!240}
+ !240 = !DITemplateTypeParameter(type: !64)
+ !241 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !242, file: !12, line: 92, baseType: !243)
+ !242 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "ScalableTagChecker<float>", file: !12, line: 91, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !244, identifier: "_ZTS18ScalableTagCheckerIfE")
+ !243 = !DIDerivedType(tag: DW_TAG_typedef, name: "type", scope: !246, file: !12, line: 89, baseType: !247)
+ !244 = !{!245}
+ !245 = !DITemplateTypeParameter(name: "T", type: !65)
+ !246 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "ClampNAndPow2<float, 64>", file: !12, line: 88, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !248, identifier: "_ZTS13ClampNAndPow2IfLi64EE")
+ !247 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd<float, 64, 0>", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !250, identifier: "_ZTS4SimdIfLi64ELi0EE")
+ !248 = !{!245, !249}
+ !249 = !DITemplateValueParameter(name: "N", type: !24, value: i32 64)
+ !250 = !{!217, !251, !54}
+ !251 = !DITemplateValueParameter(type: !24, value: i32 64)
+ !252 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition<unsigned char, Simd<float, 64, 0> >", file: !12, line: 101, baseType: !253)
+ !253 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition<unsigned char>", scope: !247, file: !12, line: 86, baseType: !254)
+ !254 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd<unsigned char, 0, 0>", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !255, identifier: "_ZTS4SimdIhLi0ELi0EE")
+ !255 = !{!256, !218, !54}
+ !256 = !DITemplateTypeParameter(name: "Lane", type: !117)
+ !257 = !DIDerivedType(tag: DW_TAG_typedef, name: "svuint8_t", file: !12, line: 22, baseType: !258)
+ !258 = !DIDerivedType(tag: DW_TAG_typedef, name: "__SVUint8_t", file: !12, baseType: !259)
+ !259 = !DICompositeType(tag: DW_TAG_array_type, baseType: !117, flags: DIFlagVector, elements: !260)
+ !260 = !{!261}
+ !261 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_constu, 8, DW_OP_bregx, 46, 0, DW_OP_mul, DW_OP_constu, 1, DW_OP_minus))
+ !262 = !DITemplateTypeParameter(name: "V", type: !64)
+ !263 = !DILocalVariable(name: "hi", arg: 1, scope: !264, file: !12, line: 248, type: !259)
+ !264 = distinct !DISubprogram(name: "CombineShiftRightBytes<8, __SVUint8_t>", linkageName: "_Z22CombineShiftRightBytesILi8Eu11__SVUint8_tET0_S1_S1_", scope: !12, file: !12, line: 248, type: !265, scopeLine: 248, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !266, retainedNodes: !267, keyInstructions: true)
+ !265 = !DISubroutineType(types: !268)
+ !266 = !{!283, !284}
+ !267 = !{!263, !269, !270, !271, !272, !273, !274, !275, !276}
+ !268 = !{!259, !259, !259}
+ !269 = !DILocalVariable(name: "lo", arg: 2, scope: !264, file: !12, line: 248, type: !259)
+ !270 = !DILocalVariable(name: "__trans_tmp_33", scope: !264, file: !12, line: 249, type: !257)
+ !271 = !DILocalVariable(name: "__trans_tmp_15", scope: !264, file: !12, line: 249, type: !257)
+ !272 = !DILocalVariable(name: "__trans_tmp_32", scope: !264, file: !12, line: 250, type: !257)
+ !273 = !DILocalVariable(name: "d8", scope: !264, file: !12, line: 251, type: !277)
+ !274 = !DILocalVariable(name: "__trans_tmp_16", scope: !264, file: !12, line: 252, type: !114)
+ !275 = !DILocalVariable(name: "lo_down", scope: !264, file: !12, line: 254, type: !257)
+ !276 = !DILocalVariable(name: "__trans_tmp_34", scope: !264, file: !12, line: 255, type: !114)
+ !277 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition<unsigned char, Simd<char, 0, 0> >", file: !12, line: 101, baseType: !278)
+ !278 = !DIDerivedType(tag: DW_TAG_typedef, name: "Repartition<unsigned char>", scope: !279, file: !12, line: 86, baseType: !254)
+ !279 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "Simd<char, 0, 0>", file: !12, line: 83, size: 8, flags: DIFlagTypePassByValue, elements: !50, templateParams: !280, identifier: "_ZTS4SimdIcLi0ELi0EE")
+ !280 = !{!281, !218, !54}
+ !281 = !DITemplateTypeParameter(name: "Lane", type: !282)
+ !282 = !DIBasicType(name: "char", size: 8, encoding: DW_ATE_unsigned_char)
+ !283 = !DITemplateValueParameter(name: "kBytes", type: !24, value: i32 8)
+ !284 = !DITemplateTypeParameter(name: "V", type: !259)
+ !285 = !DILocalVariable(name: "hi", arg: 1, scope: !286, file: !12, line: 216, type: !257)
+ !286 = distinct !DISubprogram(name: "Ext<8>", linkageName: "_Z3ExtILi8EEu11__SVUint8_tS0_S0_", scope: !12, file: !12, line: 216, type: !287, scopeLine: 216, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !288, retainedNodes: !289, keyInstructions: true)
+ !287 = !DISubroutineType(types: !290)
+ !288 = !{!292}
+ !289 = !{!285, !291}
+ !290 = !{!257, !257, !257}
+ !291 = !DILocalVariable(name: "lo", arg: 2, scope: !286, file: !12, line: 216, type: !257)
+ !292 = !DITemplateValueParameter(name: "kIndex", type: !24, value: i32 8)
+ !293 = !DILocalVariable(name: "a", arg: 1, scope: !294, file: !12, line: 180, type: !47)
+ !294 = distinct !DISubprogram(name: "Min<__SVFloat16_t>", linkageName: "_Z3MinIu13__SVFloat16_tET_S1_S1_", scope: !12, file: !12, line: 180, type: !295, scopeLine: 180, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !3, templateParams: !296, retainedNodes: !297, keyInstructions: true)
+ !295 = !DISubroutineType(types: !298)
+ !296 = !{!166}
+ !297 = !{!293, !299, !300, !301, !302, !303, !304}
+ !298 = !{!47, !47, !47}
+ !299 = !DILocalVariable(name: "b", arg: 2, scope: !294, file: !12, line: 180, type: !47)
+ !300 = !DILocalVariable(name: "__trans_tmp_36", scope: !294, file: !12, line: 181, type: !45)
+ !301 = !DILocalVariable(name: "__trans_tmp_25", scope: !294, file: !12, line: 181, type: !45)
+ !302 = !DILocalVariable(name: "__trans_tmp_27", scope: !294, file: !12, line: 182, type: !114)
+ !303 = !DILocalVariable(name: "__trans_tmp_24", scope: !294, file: !12, line: 183, type: !114)
+ !304 = !DILocalVariable(name: "__trans_tmp_19", scope: !294, file: !12, line: 184, type: !114)
+ !308 = distinct !DILocation(line: 315, column: 12, scope: !221, inlinedAt: !225)
+ !309 = distinct !DILocation(line: 268, column: 21, scope: !227, inlinedAt: !308)
+ !311 = distinct !DILocation(line: 254, column: 18, scope: !264, inlinedAt: !309)
+ !312 = !DILocation(line: 217, column: 10, scope: !286, inlinedAt: !311, atomGroup: 1, atomRank: 2)
+ !313 = !DILocation(line: 257, column: 20, scope: !264, inlinedAt: !309, atomGroup: 5, atomRank: 2)
+ !314 = !DILocation(line: 0, scope: !294, inlinedAt: !315)
+ !315 = distinct !DILocation(line: 331, column: 22, scope: !122, inlinedAt: !201)
+ !316 = !DILocation(line: 185, column: 20, scope: !294, inlinedAt: !315)
+ !317 = !DILocation(line: 403, column: 1, scope: !2, atomGroup: 19449, atomRank: 1)
+
+...
+---
+name: _Z10Sort16RowsILi6EEv12SharedTraitsI10TraitsLaneEP22Trans_NS_hwy_float16_tiS4_
+body: |
+ bb.0:
+ liveins: $x1, $z0, $z1, $p0
+
+ $z30 = LDR_ZXI $x1, -14
+ $z31 = LDR_ZXI $x1, -13
+ $z23 = ORR_ZZZ $z30, $z30
+ renamable $z2 = EXT_ZZI_B renamable $z30_z31, 8, debug-location !312
+ renamable $z7 = SEL_ZPZZ_B renamable $p0, renamable $z0, killed renamable $z1, debug-location !313
+ DBG_VALUE $z30, $noreg, !129, !DIExpression(), debug-location !200
+ renamable $p3 = nofpexcept FCMGT_PPzZZ_H renamable $p0, renamable $z0, undef renamable $z1, debug-location !316
+ DBG_VALUE $z30_z31, $noreg, !129, !DIExpression(), debug-location !200
+ DBG_VALUE $z30_z31, $noreg, !293, !DIExpression(), debug-location !314
+ RET undef $lr, debug-location !317
+...
+
>From 0f411fdae132c3e944fa198072f8d405e464672b Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Wed, 24 Sep 2025 12:42:50 +0200
Subject: [PATCH 28/35] [IR] Forbid mixing condition and operand bundle assumes
(#160460)
Assumes either have a boolean condition, or a number of attribute based
operand bundles. Currently, we also allow mixing both forms, though we
don't make use of this in practice. This adds additional complexity for
code dealing with assumes.
Forbid mixing both forms, by requiring that assumes with operand bundles
have an i1 true condition.
---
llvm/docs/LangRef.rst | 4 +++-
llvm/lib/IR/Verifier.cpp | 5 +++++
.../AlignmentFromAssumptions/domtree-crash.ll | 4 ++--
llvm/test/Transforms/InstCombine/assume.ll | 16 ++++++++--------
llvm/test/Verifier/assume-bundles.ll | 4 +++-
mlir/test/Target/LLVMIR/Import/intrinsic.ll | 6 +++---
mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir | 7 ++++---
7 files changed, 28 insertions(+), 18 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 2fe3611be80b0..2934180c5ef79 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3013,6 +3013,8 @@ assumptions, such as that a :ref:`parameter attribute <paramattrs>` or a
location. Operand bundles enable assumptions that are either hard or impossible
to represent as a boolean argument of an :ref:`llvm.assume <int_assume>`.
+Assumes with operand bundles must have ``i1 true`` as the condition operand.
+
An assume operand bundle has the form:
::
@@ -3045,7 +3047,7 @@ allows the optimizer to assume that at location of call to
.. code-block:: llvm
- call void @llvm.assume(i1 %cond) ["cold"(), "nonnull"(ptr %val)]
+ call void @llvm.assume(i1 true) ["cold"(), "nonnull"(ptr %val)]
allows the optimizer to assume that the :ref:`llvm.assume <int_assume>`
call location is cold and that ``%val`` may not be null.
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 9bde965d660a4..0c6175b1945cc 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -5675,6 +5675,11 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
default:
break;
case Intrinsic::assume: {
+ if (Call.hasOperandBundles()) {
+ auto *Cond = dyn_cast<ConstantInt>(Call.getArgOperand(0));
+ Check(Cond && Cond->isOne(),
+ "assume with operand bundles must have i1 true condition", Call);
+ }
for (auto &Elem : Call.bundle_op_infos()) {
unsigned ArgCount = Elem.End - Elem.Begin;
// Separate storage assumptions are special insofar as they're the only
diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll b/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll
index c7fc1dc699671..f9b9dd13b0d0c 100644
--- a/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/domtree-crash.ll
@@ -9,10 +9,10 @@
define void @fn1() {
; CHECK-LABEL: define void @fn1() {
-; CHECK-NEXT: call void @llvm.assume(i1 false) [ "align"(ptr @global, i64 1) ]
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr @global, i64 1) ]
; CHECK-NEXT: ret void
;
- call void @llvm.assume(i1 false) [ "align"(ptr @global, i64 1) ]
+ call void @llvm.assume(i1 true) [ "align"(ptr @global, i64 1) ]
ret void
}
diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll
index e87a61a57ea47..7b0b871513513 100644
--- a/llvm/test/Transforms/InstCombine/assume.ll
+++ b/llvm/test/Transforms/InstCombine/assume.ll
@@ -498,13 +498,13 @@ not_taken:
define i1 @nonnull3B(ptr %a, i1 %control) {
; CHECK-LABEL: @nonnull3B(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
; CHECK-NEXT: br i1 [[CONTROL:%.*]], label [[TAKEN:%.*]], label [[NOT_TAKEN:%.*]]
; CHECK: taken:
-; CHECK-NEXT: [[LOAD:%.*]] = load ptr, ptr [[A:%.*]], align 8
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne ptr [[LOAD]], null
-; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) [ "nonnull"(ptr [[LOAD]]) ]
-; CHECK-NEXT: ret i1 [[CMP]]
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ]
+; CHECK-NEXT: ret i1 true
; CHECK: not_taken:
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "nonnull"(ptr [[LOAD]]) ]
; CHECK-NEXT: ret i1 false
;
entry:
@@ -512,10 +512,10 @@ entry:
%cmp = icmp ne ptr %load, null
br i1 %control, label %taken, label %not_taken
taken:
- call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
+ call void @llvm.assume(i1 true) ["nonnull"(ptr %load)]
ret i1 %cmp
not_taken:
- call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
+ call void @llvm.assume(i1 true) ["nonnull"(ptr %load)]
ret i1 %control
}
@@ -544,7 +544,7 @@ taken:
br label %exit
exit:
; FIXME: this shouldn't be dropped because it is still dominated by the new position of %load
- call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
+ call void @llvm.assume(i1 %cmp)
ret i1 %cmp2
not_taken:
call void @llvm.assume(i1 %cmp)
@@ -575,7 +575,7 @@ taken:
exit:
ret i1 %cmp2
not_taken:
- call void @llvm.assume(i1 %cmp) ["nonnull"(ptr %load)]
+ call void @llvm.assume(i1 %cmp)
ret i1 %control
}
diff --git a/llvm/test/Verifier/assume-bundles.ll b/llvm/test/Verifier/assume-bundles.ll
index d8037b965edb5..728b118c99fb6 100644
--- a/llvm/test/Verifier/assume-bundles.ll
+++ b/llvm/test/Verifier/assume-bundles.ll
@@ -3,7 +3,7 @@
declare void @llvm.assume(i1)
-define void @func(ptr %P, i32 %P1, ptr %P2, ptr %P3) {
+define void @func(ptr %P, i32 %P1, ptr %P2, ptr %P3, i1 %cond) {
; CHECK: tags must be valid attribute names
; CHECK: "adazdazd"
call void @llvm.assume(i1 true) ["adazdazd"()]
@@ -32,5 +32,7 @@ define void @func(ptr %P, i32 %P1, ptr %P2, ptr %P3) {
call void @llvm.assume(i1 true) ["separate_storage"(ptr %P, i32 123)]
; CHECK: dereferenceable assumptions should have 2 arguments
call void @llvm.assume(i1 true) ["align"(ptr %P, i32 4), "dereferenceable"(ptr %P)]
+; CHECK: assume with operand bundles must have i1 true condition
+ call void @llvm.assume(i1 %cond) ["nonnull"(ptr %P)]
ret void
}
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index e5f92e4337154..d2bb80982bb3d 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -733,12 +733,12 @@ define void @assume(i1 %true) {
}
; CHECK-LABEL: @assume_with_opbundles
-; CHECK-SAME: %[[TRUE:[a-zA-Z0-9]+]]
; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]]
-define void @assume_with_opbundles(i1 %true, ptr %p) {
+define void @assume_with_opbundles(ptr %p) {
+ ; CHECK: %[[TRUE:.+]] = llvm.mlir.constant(true) : i1
; CHECK: %[[ALIGN:.+]] = llvm.mlir.constant(8 : i32) : i32
; CHECK: llvm.intr.assume %[[TRUE]] ["align"(%[[PTR]], %[[ALIGN]] : !llvm.ptr, i32)] : i1
- call void @llvm.assume(i1 %true) ["align"(ptr %p, i32 8)]
+ call void @llvm.assume(i1 true) ["align"(ptr %p, i32 8)]
ret void
}
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 01aa740452b62..cf3e129879d09 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -460,10 +460,11 @@ llvm.func @assume_without_opbundles(%cond: i1) {
}
// CHECK-LABEL: @assume_with_opbundles
-llvm.func @assume_with_opbundles(%cond: i1, %p: !llvm.ptr) {
+llvm.func @assume_with_opbundles(%p: !llvm.ptr) {
+ %true = llvm.mlir.constant(true) : i1
%0 = llvm.mlir.constant(8 : i32) : i32
- // CHECK: call void @llvm.assume(i1 %{{.+}}) [ "align"(ptr %{{.+}}, i32 8) ]
- llvm.intr.assume %cond ["align"(%p, %0 : !llvm.ptr, i32)] : i1
+ // CHECK: call void @llvm.assume(i1 true) [ "align"(ptr %{{.+}}, i32 8) ]
+ llvm.intr.assume %true ["align"(%p, %0 : !llvm.ptr, i32)] : i1
llvm.return
}
>From 94c58b2a9d48a0356833d125ad80edd3fa2b1548 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 24 Sep 2025 11:43:36 +0100
Subject: [PATCH 29/35] [Headers][X86] Allow _mm_max_pu8 / _mm_min_pu8 to be
used in constexpr (#160489)
These were missed in an earlier patch
---
clang/lib/Headers/xmmintrin.h | 10 ++++------
clang/test/CodeGen/X86/mmx-builtins.c | 4 ++--
2 files changed, 6 insertions(+), 8 deletions(-)
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 4891e3ce077b5..d876b4735a7d2 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -2363,9 +2363,8 @@ _mm_max_pi16(__m64 __a, __m64 __b) {
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_max_pu8(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_max_pu8(__m64 __a, __m64 __b) {
return (__m64)__builtin_elementwise_max((__v8qu)__a, (__v8qu)__b);
}
@@ -2400,9 +2399,8 @@ _mm_min_pi16(__m64 __a, __m64 __b) {
/// \param __b
/// A 64-bit integer vector containing one of the source operands.
/// \returns A 64-bit integer vector containing the comparison results.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2
-_mm_min_pu8(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR
+_mm_min_pu8(__m64 __a, __m64 __b) {
return (__m64)__builtin_elementwise_min((__v8qu)__a, (__v8qu)__b);
}
diff --git a/clang/test/CodeGen/X86/mmx-builtins.c b/clang/test/CodeGen/X86/mmx-builtins.c
index 43d9ec5e6cc8b..266c78ebe7a3f 100644
--- a/clang/test/CodeGen/X86/mmx-builtins.c
+++ b/clang/test/CodeGen/X86/mmx-builtins.c
@@ -371,7 +371,6 @@ __m64 test_mm_max_pi16(__m64 a, __m64 b) {
// CHECK: call <4 x i16> @llvm.smax.v4i16(
return _mm_max_pi16(a, b);
}
-
TEST_CONSTEXPR(match_v4hi(_mm_max_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, 2, -3, 4}), 1, 2, 3, 4));
__m64 test_mm_max_pu8(__m64 a, __m64 b) {
@@ -379,13 +378,13 @@ __m64 test_mm_max_pu8(__m64 a, __m64 b) {
// CHECK: call <8 x i8> @llvm.umax.v8i8(
return _mm_max_pu8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_max_pu8((__m64)(__v8qs){ 16, 17, 18, -19, -20, 21, -22, -23}, (__m64)(__v8qs){ 1, -2, -3, 4, 5, 0, 7, -8}), 16, -2, -3, -19, -20, 21, -22, -8));
__m64 test_mm_min_pi16(__m64 a, __m64 b) {
// CHECK-LABEL: test_mm_min_pi16
// CHECK: call <4 x i16> @llvm.smin.v4i16(
return _mm_min_pi16(a, b);
}
-
TEST_CONSTEXPR(match_v4hi(_mm_min_pi16((__m64)(__v4hi){+1, -2, +3, -4}, (__m64)(__v4hi){-1, 2, -3, 4}), -1, -2, -3, -4));
__m64 test_mm_min_pu8(__m64 a, __m64 b) {
@@ -393,6 +392,7 @@ __m64 test_mm_min_pu8(__m64 a, __m64 b) {
// CHECK: call <8 x i8> @llvm.umin.v8i8(
return _mm_min_pu8(a, b);
}
+TEST_CONSTEXPR(match_v8qi(_mm_min_pu8((__m64)(__v8qs){ 16, 17, 18, -19, -20, 21, -22, -23}, (__m64)(__v8qs){ 1, -2, -3, 4, 5, 0, 7, -8}), 1, 17, 18, 4, 5, 0, 7, -23));
int test_mm_movemask_pi8(__m64 a) {
// CHECK-LABEL: test_mm_movemask_pi8
>From 2d3cc10ffcd1658d2ae7944d79092430119e3417 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Wed, 24 Sep 2025 11:45:53 +0100
Subject: [PATCH 30/35] [Headers][X86] _mm_cmpgt_epi64 is only available on
SSE42 targets (#160491)
smmintrin.h redefines __DEFAULT_FN_ATTRS half way through the file to
handle SSE42-only instructions - when we made _mm_cmpgt_epi64 constexpr
we failed to redefine __DEFAULT_FN_ATTRS_CONSTEXPR as well to match
---
clang/lib/Headers/smmintrin.h | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 6319fdbbeb8f0..3aff679b608ba 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -1534,9 +1534,16 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
so we'll do the same. */
#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
/* These specify the type of data that we're comparing. */
#define _SIDD_UBYTE_OPS 0x00
#define _SIDD_UWORD_OPS 0x01
>From 07278838b6bb3f5ddd5cc8551fc0b2aacefbe15b Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Wed, 24 Sep 2025 11:51:55 +0100
Subject: [PATCH 31/35] [LoopPeel] Add test with branch that can be simplified
with guards.
Add test where a branch can be removed after peeling by applying info
from loop guards. It unfortunately requires running IndVars first, to
strengthen flags of the induction.
---
llvm/test/Transforms/LoopUnroll/scevunroll.ll | 78 +++++++++++++++++--
1 file changed, 71 insertions(+), 7 deletions(-)
diff --git a/llvm/test/Transforms/LoopUnroll/scevunroll.ll b/llvm/test/Transforms/LoopUnroll/scevunroll.ll
index b6b14e365cc1d..fa55eab062198 100644
--- a/llvm/test/Transforms/LoopUnroll/scevunroll.ll
+++ b/llvm/test/Transforms/LoopUnroll/scevunroll.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -S -passes='loop(indvars),loop-unroll' -verify-loop-info | FileCheck %s
;
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-n32:64-S128-Fn32"
+
; Unit tests for loop unrolling using ScalarEvolution to compute trip counts.
;
; Indvars is run first to generate an "old" SCEV result. Some unit
@@ -66,14 +68,14 @@ define i64 @earlyLoopTest(ptr %base) nounwind {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[BASE:%.*]], align 4
+; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[BASE:%.*]], align 8
; CHECK-NEXT: br label [[TAIL:%.*]]
; CHECK: tail:
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[VAL]], 0
; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_1:%.*]], label [[EXIT2:%.*]]
; CHECK: loop.1:
; CHECK-NEXT: [[ADR_1:%.*]] = getelementptr i64, ptr [[BASE]], i64 1
-; CHECK-NEXT: [[VAL_1:%.*]] = load i64, ptr [[ADR_1]], align 4
+; CHECK-NEXT: [[VAL_1:%.*]] = load i64, ptr [[ADR_1]], align 8
; CHECK-NEXT: [[S_NEXT_1:%.*]] = add i64 [[VAL]], [[VAL_1]]
; CHECK-NEXT: br label [[TAIL_1:%.*]]
; CHECK: tail.1:
@@ -81,7 +83,7 @@ define i64 @earlyLoopTest(ptr %base) nounwind {
; CHECK-NEXT: br i1 [[CMP2_1]], label [[LOOP_2:%.*]], label [[EXIT2]]
; CHECK: loop.2:
; CHECK-NEXT: [[ADR_2:%.*]] = getelementptr i64, ptr [[BASE]], i64 2
-; CHECK-NEXT: [[VAL_2:%.*]] = load i64, ptr [[ADR_2]], align 4
+; CHECK-NEXT: [[VAL_2:%.*]] = load i64, ptr [[ADR_2]], align 8
; CHECK-NEXT: [[S_NEXT_2:%.*]] = add i64 [[S_NEXT_1]], [[VAL_2]]
; CHECK-NEXT: br label [[TAIL_2:%.*]]
; CHECK: tail.2:
@@ -89,7 +91,7 @@ define i64 @earlyLoopTest(ptr %base) nounwind {
; CHECK-NEXT: br i1 [[CMP2_2]], label [[LOOP_3:%.*]], label [[EXIT2]]
; CHECK: loop.3:
; CHECK-NEXT: [[ADR_3:%.*]] = getelementptr i64, ptr [[BASE]], i64 3
-; CHECK-NEXT: [[VAL_3:%.*]] = load i64, ptr [[ADR_3]], align 4
+; CHECK-NEXT: [[VAL_3:%.*]] = load i64, ptr [[ADR_3]], align 8
; CHECK-NEXT: [[S_NEXT_3:%.*]] = add i64 [[S_NEXT_2]], [[VAL_3]]
; CHECK-NEXT: br i1 false, label [[TAIL_3:%.*]], label [[EXIT1:%.*]]
; CHECK: tail.3:
@@ -381,7 +383,7 @@ define i32 @test_pr56044(ptr %src, i32 %a) {
; CHECK: loop.2.peel:
; CHECK-NEXT: [[IV_2_NEXT_PEEL:%.*]] = add i32 0, [[ADD_2]]
; CHECK-NEXT: [[IV_1_NEXT_PEEL:%.*]] = add nuw nsw i32 0, 1
-; CHECK-NEXT: [[EC_2_PEEL:%.*]] = icmp ult i32 [[IV_1_NEXT_PEEL]], 12345
+; CHECK-NEXT: [[EC_2_PEEL:%.*]] = icmp ne i32 [[IV_1_NEXT_PEEL]], 12345
; CHECK-NEXT: br i1 [[EC_2_PEEL]], label [[LOOP_2_PEEL_NEXT:%.*]], label [[EXIT:%.*]]
; CHECK: loop.2.peel.next:
; CHECK-NEXT: br label [[LOOP_2_PEEL_NEXT2:%.*]]
@@ -394,8 +396,8 @@ define i32 @test_pr56044(ptr %src, i32 %a) {
; CHECK-NEXT: [[IV_2:%.*]] = phi i32 [ [[IV_2_NEXT_PEEL]], [[MID_PEEL_NEWPH]] ], [ [[IV_2_NEXT:%.*]], [[LOOP_2]] ]
; CHECK-NEXT: [[IV_2_NEXT]] = add i32 2, [[IV_2]]
; CHECK-NEXT: [[IV_1_NEXT]] = add nuw nsw i32 [[IV_1]], 1
-; CHECK-NEXT: [[EC_2:%.*]] = icmp ult i32 [[IV_1_NEXT]], 12345
-; CHECK-NEXT: br i1 [[EC_2]], label [[LOOP_2]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP2:![0-9]+]]
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_1_NEXT]], 12345
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP_2]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK: exit.loopexit:
; CHECK-NEXT: [[LCSSA_2_PH:%.*]] = phi i32 [ [[IV_2_NEXT]], [[LOOP_2]] ]
; CHECK-NEXT: br label [[EXIT]]
@@ -435,3 +437,65 @@ exit:
}
declare void @fn(i32)
+
+
+define void @peel_int_eq_condition(i32 %start) {
+; CHECK-LABEL: @peel_int_eq_condition(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SMAX:%.*]] = call i32 @llvm.smax.i32(i32 [[START:%.*]], i32 100)
+; CHECK-NEXT: [[TMP0:%.*]] = add nuw i32 [[SMAX]], 1
+; CHECK-NEXT: br label [[LOOP_PEEL_BEGIN:%.*]]
+; CHECK: loop.peel.begin:
+; CHECK-NEXT: br label [[LOOP_PEEL:%.*]]
+; CHECK: loop.peel:
+; CHECK-NEXT: [[C_0_PEEL:%.*]] = icmp eq i32 [[START]], [[START]]
+; CHECK-NEXT: br i1 [[C_0_PEEL]], label [[IF_THEN_PEEL:%.*]], label [[LOOP_LATCH_PEEL:%.*]]
+; CHECK: if.then.peel:
+; CHECK-NEXT: call void @fn(i32 [[START]])
+; CHECK-NEXT: br label [[LOOP_LATCH_PEEL]]
+; CHECK: loop.latch.peel:
+; CHECK-NEXT: [[IV_NEXT_PEEL:%.*]] = add i32 [[START]], 1
+; CHECK-NEXT: [[EXITCOND_PEEL:%.*]] = icmp ne i32 [[IV_NEXT_PEEL]], [[TMP0]]
+; CHECK-NEXT: br i1 [[EXITCOND_PEEL]], label [[LOOP_PEEL_NEXT:%.*]], label [[EXIT:%.*]]
+; CHECK: loop.peel.next:
+; CHECK-NEXT: br label [[LOOP_PEEL_NEXT1:%.*]]
+; CHECK: loop.peel.next1:
+; CHECK-NEXT: br label [[ENTRY_PEEL_NEWPH:%.*]]
+; CHECK: entry.peel.newph:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[IV_NEXT_PEEL]], [[ENTRY_PEEL_NEWPH]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ]
+; CHECK-NEXT: [[C_0:%.*]] = icmp eq i32 [[IV]], [[START]]
+; CHECK-NEXT: br i1 [[C_0]], label [[IF_THEN:%.*]], label [[LOOP_LATCH]]
+; CHECK: if.then:
+; CHECK-NEXT: call void @fn(i32 [[IV]])
+; CHECK-NEXT: br label [[LOOP_LATCH]]
+; CHECK: loop.latch:
+; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[IV_NEXT]], [[TMP0]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOP]], label [[EXIT_LOOPEXIT:%.*]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK: exit.loopexit:
+; CHECK-NEXT: br label [[EXIT]]
+; CHECK: exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ %start, %entry ], [ %iv.next, %loop.latch ]
+ %c.0 = icmp eq i32 %iv, %start
+ br i1 %c.0, label %if.then, label %loop.latch
+
+if.then:
+ call void @fn(i32 %iv)
+ br label %loop.latch
+
+loop.latch:
+ %iv.next = add i32 %iv, 1
+ %ec = icmp slt i32 %iv, 100
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
>From 97b0a4563e6b4cea0e36c7c09f50806ba0f61e34 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Thu, 25 Sep 2025 14:36:48 +0530
Subject: [PATCH 32/35] Address PR review feedback: remove name-based intrinsic
matching, add wave32 support, fix test structure
- Remove fallback name-based intrinsic matching and signature verification as requested by reviewers
- Add mbcnt_lo case to handle wave32 targets when mbcnt.lo(~0, 0) pattern is detected
- Fix test structure to use proper function names and full CHECK patterns instead of CHECK-NOT
- Add new test case for wave32 mbcnt_lo optimization
- Updated function names from generic @test_mbcnt_to_bitmask_* to more descriptive names
- Added new test case for non-matching work group size, missing reqd_work_group_size, partial mask, and non-zero base
- Use update_test_checks.py to generate proper autogenerated CHECK patterns
- Fix clang-format issues
---
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 89 ++++++++++---------
.../AMDGPU/mbcnt-to-bitmask-neg.ll | 17 ++--
.../AMDGPU/mbcnt-to-bitmask-posit.ll | 17 ++--
.../AMDGPU/mbcnt-to-workitem-neg.ll | 12 ++-
.../AMDGPU/mbcnt-to-workitem-posit.ll | 15 +++-
.../AMDGPU/mbcnt-to-workitem-wave32-neg.ll | 66 ++++++++++++++
.../AMDGPU/mbcnt-to-workitem-wave32.ll | 25 ++++++
7 files changed, 182 insertions(+), 59 deletions(-)
create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
create mode 100644 llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32.ll
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 509e2b019224f..263801d6b231d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -15,7 +15,6 @@
//===----------------------------------------------------------------------===//
#include "AMDGPUInstrInfo.h"
-#include "AMDGPUSubtarget.h"
#include "AMDGPUTargetTransformInfo.h"
#include "GCNSubtarget.h"
#include "llvm/ADT/FloatingPointMode.h"
@@ -679,7 +678,8 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
auto IID = SrcCI->getIntrinsicID();
- // llvm.amdgcn.rcp(llvm.amdgcn.sqrt(x)) -> llvm.amdgcn.rsq(x) if contractable
+ // llvm.amdgcn.rcp(llvm.amdgcn.sqrt(x)) -> llvm.amdgcn.rsq(x) if
+ // contractable
//
// llvm.amdgcn.rcp(llvm.sqrt(x)) -> llvm.amdgcn.rsq(x) if contractable and
// relaxed.
@@ -890,13 +890,13 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
}
case Intrinsic::amdgcn_cvt_off_f32_i4: {
- Value* Arg = II.getArgOperand(0);
+ Value *Arg = II.getArgOperand(0);
Type *Ty = II.getType();
if (isa<PoisonValue>(Arg))
return IC.replaceInstUsesWith(II, PoisonValue::get(Ty));
- if(IC.getSimplifyQuery().isUndefValue(Arg))
+ if (IC.getSimplifyQuery().isUndefValue(Arg))
return IC.replaceInstUsesWith(II, Constant::getNullValue(Ty));
ConstantInt *CArg = dyn_cast<ConstantInt>(II.getArgOperand(0));
@@ -1316,6 +1316,38 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
}
+ case Intrinsic::amdgcn_mbcnt_lo: {
+ // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
+ if (ST && ST->isWave32()) {
+ // Check for pattern mbcnt.lo(~0, 0)
+ auto *Arg0C = dyn_cast<ConstantInt>(II.getArgOperand(0));
+ auto *Arg1C = dyn_cast<ConstantInt>(II.getArgOperand(1));
+ if (Arg0C && Arg1C && Arg0C->isAllOnesValue() && Arg1C->isZero()) {
+ // Check reqd_work_group_size similar to mbcnt_hi case
+ if (Function *F = II.getFunction()) {
+ unsigned Wave = 0;
+ if (ST->isWaveSizeKnown())
+ Wave = ST->getWavefrontSize();
+
+ if (auto MaybeX = ST->getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && XLen == WavefrontSize32)
+ Wave = XLen;
+
+ if (Wave != 0 && XLen == Wave) {
+ SmallVector<Type *, 0> OverloadTys;
+ CallInst *NewCall = IC.Builder.CreateIntrinsic(
+ Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
+ NewCall->takeName(&II);
+ ST->makeLIDRangeMetadata(NewCall);
+ return IC.replaceInstUsesWith(II, NewCall);
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
case Intrinsic::amdgcn_mbcnt_hi: {
// exec_hi is all 0, so this is just a copy on wave32.
if (ST && ST->isWave32())
@@ -1324,35 +1356,12 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
// Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
if (auto *HiArg1 = dyn_cast<CallInst>(II.getArgOperand(1))) {
Function *CalledF = HiArg1->getCalledFunction();
- bool IsMbcntLo = false;
- if (CalledF) {
- // Fast-path: if this is a declared intrinsic, check the intrinsic ID.
- if (CalledF->getIntrinsicID() == Intrinsic::amdgcn_mbcnt_lo) {
- IsMbcntLo = true;
- } else {
- // Fallback: accept a declared function with the canonical name, but
- // verify its signature to be safe: i32(i32,i32). Use the name
- // comparison only when there's no intrinsic ID match.
- if (CalledF->getName() == "llvm.amdgcn.mbcnt.lo") {
- if (FunctionType *FT = CalledF->getFunctionType()) {
- if (FT->getNumParams() == 2 &&
- FT->getReturnType()->isIntegerTy(32) &&
- FT->getParamType(0)->isIntegerTy(32) &&
- FT->getParamType(1)->isIntegerTy(32))
- IsMbcntLo = true;
- }
- }
- }
- }
-
- if (!IsMbcntLo)
+ if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
break;
// hi arg0 must be all-ones
- if (auto *HiArg0C = dyn_cast<ConstantInt>(II.getArgOperand(0))) {
- if (!HiArg0C->isAllOnesValue())
- break;
- } else
+ auto *HiArg0C = dyn_cast<ConstantInt>(II.getArgOperand(0));
+ if (!HiArg0C || !HiArg0C->isAllOnesValue())
break;
// lo args: arg0 == ~0, arg1 == 0
@@ -1375,8 +1384,8 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
if (ST) {
if (auto MaybeX = ST->getReqdWorkGroupSize(*F, 0)) {
unsigned XLen = *MaybeX;
- if (Wave == 0 && (XLen == WavefrontSize32 ||
- XLen == WavefrontSize64))
+ if (Wave == 0 &&
+ (XLen == WavefrontSize32 || XLen == WavefrontSize64))
Wave = XLen; // allow common sizes under test harness
if (Wave != 0 && XLen == Wave) {
@@ -1392,9 +1401,9 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
// replace lane-id computation with a bitmask when the wave is a
// power-of-two. Use the Subtarget helper to conservatively decide
// when per-wave tiling is preserved.
- if (ST->hasWavefrontsEvenlySplittingXDim(
- *F, /*RequiresUniformYZ=*/true)) {
- if (Wave != 0 && isPowerOf2_32(Wave)) {
+ if (ST->hasWavefrontsEvenlySplittingXDim(
+ *F, /*RequiresUniformYZ=*/true)) {
+ if (Wave != 0 && isPowerOf2_32(Wave)) {
// Construct: tid = workitem.id.x(); mask = Wave-1; res = tid &
// mask
SmallVector<Type *, 0> OverloadTys;
@@ -1858,7 +1867,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
}
}
if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
- AMDGPU::getImageDimIntrinsicInfo(II.getIntrinsicID())) {
+ AMDGPU::getImageDimIntrinsicInfo(II.getIntrinsicID())) {
return simplifyAMDGCNImageIntrinsic(ST, ImageDimIntr, II, IC);
}
return std::nullopt;
@@ -1866,10 +1875,10 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
/// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
///
-/// The result of simplifying amdgcn image and buffer store intrinsics is updating
-/// definitions of the intrinsics vector argument, not Uses of the result like
-/// image and buffer loads.
-/// Note: This only supports non-TFE/LWE image intrinsic calls; those have
+/// The result of simplifying amdgcn image and buffer store intrinsics is
+/// updating definitions of the intrinsics vector argument, not Uses of the
+/// result like image and buffer loads. Note: This only supports non-TFE/LWE
+/// image intrinsic calls; those have
/// struct returns.
static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
IntrinsicInst &II,
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
index 0313f284e5775..8c8546fb1fab5 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
@@ -1,10 +1,14 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
-; CHECK-NOT: and i32
-; CHECK-NOT: @llvm.amdgcn.workitem.id.x()
-; ModuleID = 'mbcnt_to_bitmask_neg'
-
-define i32 @kernel() !reqd_work_group_size !1 {
+define i32 @test_mbcnt_non_wave_size() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_non_wave_size(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT: ret i32 [[B]]
+;
entry:
%a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
@@ -16,3 +20,6 @@ entry:
; Declarations
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
index b87913edc8805..09a32801b01fe 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -1,11 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
-; CHECK: @llvm.amdgcn.workitem.id.x()
-; CHECK-NOT: call i32 @llvm.amdgcn.mbcnt.hi
-; CHECK-NOT: call i32 @llvm.amdgcn.mbcnt.lo
-; ModuleID = 'mbcnt_to_bitmask_posit'
-
-define i32 @kernel() !reqd_work_group_size !1 {
+define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_wave64_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
entry:
%a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
@@ -18,3 +20,6 @@ entry:
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
index 1779b631be9f6..e39ffafdc0590 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
@@ -1,9 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
-; CHECK: llvm.amdgcn.mbcnt.lo
-; CHECK: llvm.amdgcn.mbcnt.hi
-; CHECK-NOT: call i32 @llvm.amdgcn.workitem.id.x()
-define i32 @kernel() {
+define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-LABEL: define i32 @test_mbcnt_no_reqd_work_group_size() {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 [[A]])
+; CHECK-NEXT: ret i32 [[B]]
+;
entry:
%a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
index d3d8d40b8359d..04c464144470a 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -1,9 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
-; CHECK-NOT: amdgcn.mbcnt_lo
-; CHECK-NOT: amdgcn.mbcnt_hi
-; CHECK: @llvm.amdgcn.workitem.id.x()
-define i32 @kernel() !reqd_work_group_size !0 {
+define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
+; CHECK-SAME: ) !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call range(i32 0, 64) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
entry:
%a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
%b = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %a)
@@ -16,3 +20,6 @@ entry:
declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
declare i32 @llvm.amdgcn.workitem.id.x()
+;.
+; CHECK: [[META0]] = !{i32 64, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
new file mode 100644
index 0000000000000..d48d7211373b9
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
@@ -0,0 +1,66 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=instcombine < %s | FileCheck %s
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when work group size doesn't match wave size
+define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_matching_wgs(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when no reqd_work_group_size is specified
+define i32 @test_mbcnt_lo_wave32_no_wgs() {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_no_wgs(
+; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo with non-all-ones first arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_partial_mask() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_partial_mask(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 15, i32 0)
+ ret i32 %a
+}
+
+; Test that mbcnt.lo with non-zero second arg is NOT optimized
+define i32 @test_mbcnt_lo_wave32_non_zero_base() !reqd_work_group_size !1 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32_non_zero_base(
+; CHECK-SAME: ) #[[ATTR0]] !reqd_work_group_size [[META1]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[A:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+; CHECK-NEXT: ret i32 [[A]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 5)
+ ret i32 %a
+}
+
+!0 = !{i32 48, i32 1, i32 1} ; Work group size 48 != wave size 32
+!1 = !{i32 32, i32 1, i32 1} ; Work group size 32 == wave size 32
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 48, i32 1, i32 1}
+; CHECK: [[META1]] = !{i32 32, i32 1, i32 1}
+;.
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32.ll b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32.ll
new file mode 100644
index 0000000000000..25c24beb57131
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=instcombine < %s | FileCheck %s
+
+define i32 @test_mbcnt_lo_wave32() !reqd_work_group_size !0 {
+; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32(
+; CHECK-SAME: ) #[[ATTR0:[0-9]+]] !reqd_work_group_size [[META0:![0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = call range(i32 0, 32) i32 @llvm.amdgcn.workitem.id.x()
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ %a = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
+ ret i32 %a
+}
+
+!0 = !{i32 32, i32 1, i32 1}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #0
+declare i32 @llvm.amdgcn.workitem.id.x() #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
+;.
+; CHECK: [[META0]] = !{i32 32, i32 1, i32 1}
+;.
>From be8b0a2f7bb4e0a62f17c1d03302dd03d00a33f3 Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 29 Sep 2025 12:24:52 +0530
Subject: [PATCH 33/35] Move mbcnt optimization from InstCombine to
AMDGPUCodeGenPrepare
This addresses reviewer concerns about pipeline timing by moving the mbcnt
optimization from InstCombine to AMDGPUCodeGenPrepare. The InstCombine pass
runs before AMDGPU Attributor, which means reqd_work_group_size metadata
may not be available. AMDGPUCodeGenPrepare runs later in the pipeline after
the attributor pass, ensuring proper metadata availability.
Changes:
- Move visitMbcntLo and visitMbcntHi methods to AMDGPUCodeGenPrepare
- Remove complex mbcnt optimization from AMDGPUInstCombineIntrinsic
- Keep simple wave32 mbcnt_hi -> copy optimization in InstCombine
- Move test files from InstCombine/AMDGPU to Transforms/AMDGPU
- Update test RUN lines to use amdgpu-codegenprepare pass
This fixes the pipeline ordering issue where InstCombine runs before
AMDGPU Attributor, preventing the optimization from triggering when
reqd_work_group_size metadata is set by the attributor.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 144 ++++++++++++++++++
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 128 +---------------
.../AMDGPU/mbcnt-to-bitmask-neg.ll | 2 +-
.../AMDGPU/mbcnt-to-bitmask-posit.ll | 2 +-
.../AMDGPU/mbcnt-to-workitem-neg.ll | 2 +-
.../AMDGPU/mbcnt-to-workitem-posit.ll | 2 +-
.../AMDGPU/mbcnt-to-workitem-wave32-neg.ll | 2 +-
.../AMDGPU/mbcnt-to-workitem-wave32.ll | 2 +-
8 files changed, 151 insertions(+), 133 deletions(-)
rename llvm/test/Transforms/{InstCombine => }/AMDGPU/mbcnt-to-bitmask-neg.ll (80%)
rename llvm/test/Transforms/{InstCombine => }/AMDGPU/mbcnt-to-bitmask-posit.ll (80%)
rename llvm/test/Transforms/{InstCombine => }/AMDGPU/mbcnt-to-workitem-neg.ll (88%)
rename llvm/test/Transforms/{InstCombine => }/AMDGPU/mbcnt-to-workitem-posit.ll (89%)
rename llvm/test/Transforms/{InstCombine => }/AMDGPU/mbcnt-to-workitem-wave32-neg.ll (98%)
rename llvm/test/Transforms/{InstCombine => }/AMDGPU/mbcnt-to-workitem-wave32.ll (95%)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 24bef82464495..9f930092f1b00 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -255,6 +255,8 @@ class AMDGPUCodeGenPrepareImpl
bool visitIntrinsicInst(IntrinsicInst &I);
bool visitFMinLike(IntrinsicInst &I);
bool visitSqrt(IntrinsicInst &I);
+ bool visitMbcntLo(IntrinsicInst &I);
+ bool visitMbcntHi(IntrinsicInst &I);
bool run();
};
@@ -1915,6 +1917,10 @@ bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
return visitFMinLike(I);
case Intrinsic::sqrt:
return visitSqrt(I);
+ case Intrinsic::amdgcn_mbcnt_lo:
+ return visitMbcntLo(I);
+ case Intrinsic::amdgcn_mbcnt_hi:
+ return visitMbcntHi(I);
default:
return false;
}
@@ -2113,6 +2119,144 @@ INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
false, false)
+bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
+ // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
+ if (!ST.isWave32())
+ return false;
+
+ // Check for pattern mbcnt.lo(~0, 0)
+ auto *Arg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+ auto *Arg1C = dyn_cast<ConstantInt>(I.getArgOperand(1));
+ if (!Arg0C || !Arg1C || !Arg0C->isAllOnesValue() || !Arg1C->isZero())
+ return false;
+
+ // Check reqd_work_group_size similar to mbcnt_hi case
+ Function *F = I.getFunction();
+ if (!F)
+ return false;
+
+ unsigned Wave = 0;
+ if (ST.isWaveSizeKnown())
+ Wave = ST.getWavefrontSize();
+
+ if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && XLen == 32)
+ Wave = XLen;
+
+ if (Wave != 0 && XLen == Wave) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ NewCall->takeName(&I);
+ ST.makeLIDRangeMetadata(NewCall);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
+ // exec_hi is all 0, so this is just a copy on wave32.
+ if (ST.isWave32()) {
+ I.replaceAllUsesWith(I.getArgOperand(1));
+ I.eraseFromParent();
+ return true;
+ }
+
+ // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
+ auto *HiArg1 = dyn_cast<CallInst>(I.getArgOperand(1));
+ if (!HiArg1)
+ return false;
+
+ Function *CalledF = HiArg1->getCalledFunction();
+ if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
+ return false;
+
+ // hi arg0 must be all-ones
+ auto *HiArg0C = dyn_cast<ConstantInt>(I.getArgOperand(0));
+ if (!HiArg0C || !HiArg0C->isAllOnesValue())
+ return false;
+
+ // lo args: arg0 == ~0, arg1 == 0
+ Value *Lo0 = HiArg1->getArgOperand(0);
+ Value *Lo1 = HiArg1->getArgOperand(1);
+ auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
+ auto *Lo1C = dyn_cast<ConstantInt>(Lo1);
+ if (!Lo0C || !Lo1C || !Lo0C->isAllOnesValue() || !Lo1C->isZero())
+ return false;
+
+ // Query reqd_work_group_size via subtarget helper and compare X to wave
+ // size conservatively.
+ Function *F = I.getFunction();
+ if (!F)
+ return false;
+
+ unsigned Wave = 0;
+ if (ST.isWaveSizeKnown())
+ Wave = ST.getWavefrontSize();
+
+ if (auto MaybeX = ST.getReqdWorkGroupSize(*F, 0)) {
+ unsigned XLen = *MaybeX;
+ if (Wave == 0 && (XLen == 32 || XLen == 64))
+ Wave = XLen; // allow common sizes under test harness
+
+ if (Wave != 0 && XLen == Wave) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ NewCall->takeName(&I);
+ // Attach range metadata when available.
+ ST.makeLIDRangeMetadata(NewCall);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ // Optional: if X dimension evenly splits into wavefronts we can
+ // replace lane-id computation with a bitmask when the wave is a
+ // power-of-two. Use the Subtarget helper to conservatively decide
+ // when per-wave tiling is preserved.
+ if (ST.hasWavefrontsEvenlySplittingXDim(*F, /*RequiresUniformYZ=*/true)) {
+ if (Wave != 0 && isPowerOf2_32(Wave)) {
+ // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
+ IRBuilder<> B(&I);
+ CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ Tid->takeName(&I);
+ IntegerType *ITy = cast<IntegerType>(Tid->getType());
+ Constant *Mask = ConstantInt::get(ITy, Wave - 1);
+ Instruction *AndInst = cast<Instruction>(B.CreateAnd(Tid, Mask));
+ AndInst->takeName(&I);
+ // Attach range metadata for the result if possible.
+ ST.makeLIDRangeMetadata(AndInst);
+ I.replaceAllUsesWith(AndInst);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+ } else {
+ // No reqd_work_group_size metadata: be conservative and only handle the
+ // common test harness cases where reqd_work_group_size metadata exists
+ // and equals 32/64.
+ if (auto *Node = F->getMetadata("reqd_work_group_size")) {
+ if (Node->getNumOperands() == 3) {
+ unsigned XLen = mdconst::extract<ConstantInt>(Node->getOperand(0))
+ ->getZExtValue();
+ if (XLen == 32 || XLen == 64) {
+ IRBuilder<> B(&I);
+ CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ NewCall->takeName(&I);
+ I.replaceAllUsesWith(NewCall);
+ I.eraseFromParent();
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
char AMDGPUCodeGenPrepare::ID = 0;
FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 263801d6b231d..6b301b55a5895 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -28,10 +28,6 @@ using namespace llvm::PatternMatch;
#define DEBUG_TYPE "AMDGPUtti"
-// Common wavefront sizes used in several conservative checks below.
-static constexpr unsigned WavefrontSize32 = 32u;
-static constexpr unsigned WavefrontSize64 = 64u;
-
namespace {
struct AMDGPUImageDMaskIntrinsic {
@@ -1316,132 +1312,10 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
break;
}
- case Intrinsic::amdgcn_mbcnt_lo: {
- // On wave32 targets, mbcnt.lo(~0, 0) can be replaced with workitem.id.x
- if (ST && ST->isWave32()) {
- // Check for pattern mbcnt.lo(~0, 0)
- auto *Arg0C = dyn_cast<ConstantInt>(II.getArgOperand(0));
- auto *Arg1C = dyn_cast<ConstantInt>(II.getArgOperand(1));
- if (Arg0C && Arg1C && Arg0C->isAllOnesValue() && Arg1C->isZero()) {
- // Check reqd_work_group_size similar to mbcnt_hi case
- if (Function *F = II.getFunction()) {
- unsigned Wave = 0;
- if (ST->isWaveSizeKnown())
- Wave = ST->getWavefrontSize();
-
- if (auto MaybeX = ST->getReqdWorkGroupSize(*F, 0)) {
- unsigned XLen = *MaybeX;
- if (Wave == 0 && XLen == WavefrontSize32)
- Wave = XLen;
-
- if (Wave != 0 && XLen == Wave) {
- SmallVector<Type *, 0> OverloadTys;
- CallInst *NewCall = IC.Builder.CreateIntrinsic(
- Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
- NewCall->takeName(&II);
- ST->makeLIDRangeMetadata(NewCall);
- return IC.replaceInstUsesWith(II, NewCall);
- }
- }
- }
- }
- }
- break;
- }
case Intrinsic::amdgcn_mbcnt_hi: {
// exec_hi is all 0, so this is just a copy on wave32.
if (ST && ST->isWave32())
return IC.replaceInstUsesWith(II, II.getArgOperand(1));
-
- // Pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
- if (auto *HiArg1 = dyn_cast<CallInst>(II.getArgOperand(1))) {
- Function *CalledF = HiArg1->getCalledFunction();
- if (!CalledF || CalledF->getIntrinsicID() != Intrinsic::amdgcn_mbcnt_lo)
- break;
-
- // hi arg0 must be all-ones
- auto *HiArg0C = dyn_cast<ConstantInt>(II.getArgOperand(0));
- if (!HiArg0C || !HiArg0C->isAllOnesValue())
- break;
-
- // lo args: arg0 == ~0, arg1 == 0
- Value *Lo0 = HiArg1->getArgOperand(0);
- Value *Lo1 = HiArg1->getArgOperand(1);
- auto *Lo0C = dyn_cast<ConstantInt>(Lo0);
- auto *Lo1C = dyn_cast<ConstantInt>(Lo1);
- if (!Lo0C || !Lo1C)
- break;
- if (!Lo0C->isAllOnesValue() || !Lo1C->isZero())
- break;
-
- // Query reqd_work_group_size via subtarget helper and compare X to wave
- // size conservatively.
- if (Function *F = II.getFunction()) {
- unsigned Wave = 0;
- if (ST && ST->isWaveSizeKnown())
- Wave = ST->getWavefrontSize();
-
- if (ST) {
- if (auto MaybeX = ST->getReqdWorkGroupSize(*F, 0)) {
- unsigned XLen = *MaybeX;
- if (Wave == 0 &&
- (XLen == WavefrontSize32 || XLen == WavefrontSize64))
- Wave = XLen; // allow common sizes under test harness
-
- if (Wave != 0 && XLen == Wave) {
- SmallVector<Type *, 0> OverloadTys;
- CallInst *NewCall = IC.Builder.CreateIntrinsic(
- Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
- NewCall->takeName(&II);
- // Attach range metadata when available.
- ST->makeLIDRangeMetadata(NewCall);
- return IC.replaceInstUsesWith(II, NewCall);
- }
- // Optional: if X dimension evenly splits into wavefronts we can
- // replace lane-id computation with a bitmask when the wave is a
- // power-of-two. Use the Subtarget helper to conservatively decide
- // when per-wave tiling is preserved.
- if (ST->hasWavefrontsEvenlySplittingXDim(
- *F, /*RequiresUniformYZ=*/true)) {
- if (Wave != 0 && isPowerOf2_32(Wave)) {
- // Construct: tid = workitem.id.x(); mask = Wave-1; res = tid &
- // mask
- SmallVector<Type *, 0> OverloadTys;
- CallInst *Tid = IC.Builder.CreateIntrinsic(
- Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
- Tid->takeName(&II);
- IntegerType *ITy = cast<IntegerType>(Tid->getType());
- Constant *Mask = ConstantInt::get(ITy, Wave - 1);
- Instruction *AndInst =
- cast<Instruction>(IC.Builder.CreateAnd(Tid, Mask));
- AndInst->takeName(&II);
- // Attach range metadata for the result if possible.
- ST->makeLIDRangeMetadata(AndInst);
- return IC.replaceInstUsesWith(II, AndInst);
- }
- }
- }
- } else {
- // No ST: be conservative and only handle the common test harness
- // cases where reqd_work_group_size metadata exists and equals
- // 32/64.
- if (auto *Node = F->getMetadata("reqd_work_group_size")) {
- if (Node->getNumOperands() == 3) {
- unsigned XLen = mdconst::extract<ConstantInt>(Node->getOperand(0))
- ->getZExtValue();
- if (XLen == WavefrontSize32 || XLen == WavefrontSize64) {
- SmallVector<Type *, 0> OverloadTys;
- CallInst *NewCall = IC.Builder.CreateIntrinsic(
- Intrinsic::amdgcn_workitem_id_x, OverloadTys, {});
- NewCall->takeName(&II);
- return IC.replaceInstUsesWith(II, NewCall);
- }
- }
- }
- }
- }
- }
-
break;
}
case Intrinsic::amdgcn_ballot: {
@@ -1456,7 +1330,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
}
}
if (ST->isWave32() &&
- II.getType()->getIntegerBitWidth() == WavefrontSize64) {
+ II.getType()->getIntegerBitWidth() == 64) {
// %b64 = call i64 ballot.i64(...)
// =>
// %b32 = call i32 ballot.i32(...)
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
similarity index 80%
rename from llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
rename to llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
index 8c8546fb1fab5..470751c3c73f3 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-neg.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-neg.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+;; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %sNOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
define i32 @test_mbcnt_non_wave_size() !reqd_work_group_size !1 {
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
similarity index 80%
rename from llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
rename to llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
index 09a32801b01fe..2b4bc678732f1 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-bitmask-posit.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-bitmask-posit.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+;; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %sNOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
define i32 @test_mbcnt_wave64_to_workitem() !reqd_work_group_size !1 {
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
similarity index 88%
rename from llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
rename to llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
index e39ffafdc0590..af8d713b798ed 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-neg.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-neg.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
define i32 @test_mbcnt_no_reqd_work_group_size() {
; CHECK-LABEL: define i32 @test_mbcnt_no_reqd_work_group_size() {
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
similarity index 89%
rename from llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
rename to llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
index 04c464144470a..51aa6b18778e4 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-posit.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-posit.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=instcombine < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -passes=amdgpu-codegenprepare < %s | FileCheck %s
define i32 @test_mbcnt_to_workitem() !reqd_work_group_size !0 {
; CHECK-LABEL: define i32 @test_mbcnt_to_workitem(
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32-neg.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
similarity index 98%
rename from llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
rename to llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
index d48d7211373b9..403ea7c361250 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32-neg.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=instcombine < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
; Test that mbcnt.lo(~0, 0) is NOT optimized on wave32 when work group size doesn't match wave size
define i32 @test_mbcnt_lo_wave32_non_matching_wgs() !reqd_work_group_size !0 {
diff --git a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32.ll b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
similarity index 95%
rename from llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32.ll
rename to llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
index 25c24beb57131..07a5028ca1ee5 100644
--- a/llvm/test/Transforms/InstCombine/AMDGPU/mbcnt-to-workitem-wave32.ll
+++ b/llvm/test/Transforms/AMDGPU/mbcnt-to-workitem-wave32.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
-; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=instcombine < %s | FileCheck %s
+; RUN: opt -S -mtriple amdgcn-unknown-amdhsa -mcpu=gfx1030 -passes=amdgpu-codegenprepare < %s | FileCheck %s
define i32 @test_mbcnt_lo_wave32() !reqd_work_group_size !0 {
; CHECK-LABEL: define i32 @test_mbcnt_lo_wave32(
>From ee552c98e446c37513cf7cd50c3096f1e877f36c Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 29 Sep 2025 12:35:41 +0530
Subject: [PATCH 34/35] Fix CreateIntrinsic calls for non-overloaded
amdgcn_workitem_id_x intrinsic
The amdgcn_workitem_id_x intrinsic is not overloaded, so we should use
the simpler CreateIntrinsic(ID, args) form instead of passing type
parameters, which caused a runtime assertion failure.
Tested with simple mbcnt pattern and verified the optimization works
correctly, converting mbcnt.hi(~0, mbcnt.lo(~0, 0)) to workitem.id.x()
with proper range metadata when reqd_work_group_size is present.
---
llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 9f930092f1b00..e58d77b0de964 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -2146,7 +2146,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
if (Wave != 0 && XLen == Wave) {
IRBuilder<> B(&I);
- CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
NewCall->takeName(&I);
ST.makeLIDRangeMetadata(NewCall);
I.replaceAllUsesWith(NewCall);
@@ -2205,7 +2205,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
if (Wave != 0 && XLen == Wave) {
IRBuilder<> B(&I);
- CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
NewCall->takeName(&I);
// Attach range metadata when available.
ST.makeLIDRangeMetadata(NewCall);
@@ -2221,7 +2221,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
if (Wave != 0 && isPowerOf2_32(Wave)) {
// Construct: tid = workitem.id.x(); mask = Wave-1; res = tid & mask
IRBuilder<> B(&I);
- CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
Tid->takeName(&I);
IntegerType *ITy = cast<IntegerType>(Tid->getType());
Constant *Mask = ConstantInt::get(ITy, Wave - 1);
@@ -2244,7 +2244,7 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
->getZExtValue();
if (XLen == 32 || XLen == 64) {
IRBuilder<> B(&I);
- CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, B.getInt32Ty(), {});
+ CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
NewCall->takeName(&I);
I.replaceAllUsesWith(NewCall);
I.eraseFromParent();
>From c6a9ce32404c2713f394dda59794c5f3502e7f4c Mon Sep 17 00:00:00 2001
From: TejaX-Alaghari <Teja.Alaghari at amd.com>
Date: Mon, 29 Sep 2025 12:55:53 +0530
Subject: [PATCH 35/35] Apply clang-format to AMDGPU source files
Apply consistent code formatting to maintain LLVM coding standards
for the mbcnt optimization implementation.
---
.../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 136 +++++++++---------
.../AMDGPU/AMDGPUInstCombineIntrinsic.cpp | 3 +-
2 files changed, 67 insertions(+), 72 deletions(-)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index e58d77b0de964..bfb3b0f66a293 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -42,10 +42,10 @@ using namespace llvm::PatternMatch;
namespace {
static cl::opt<bool> WidenLoads(
- "amdgpu-codegenprepare-widen-constant-loads",
- cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+ "amdgpu-codegenprepare-widen-constant-loads",
+ cl::desc(
+ "Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
static cl::opt<bool>
BreakLargePHIs("amdgpu-codegenprepare-break-large-phis",
@@ -64,32 +64,29 @@ static cl::opt<unsigned> BreakLargePHIsThreshold(
cl::ReallyHidden, cl::init(32));
static cl::opt<bool> UseMul24Intrin(
- "amdgpu-codegenprepare-mul24",
- cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(true));
+ "amdgpu-codegenprepare-mul24",
+ cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(true));
// Legalize 64-bit division by using the generic IR expansion.
-static cl::opt<bool> ExpandDiv64InIR(
- "amdgpu-codegenprepare-expand-div64",
- cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+static cl::opt<bool>
+ ExpandDiv64InIR("amdgpu-codegenprepare-expand-div64",
+ cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
// Leave all division operations as they are. This supersedes ExpandDiv64InIR
// and is used for testing the legalizer.
static cl::opt<bool> DisableIDivExpand(
- "amdgpu-codegenprepare-disable-idiv-expansion",
- cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+ "amdgpu-codegenprepare-disable-idiv-expansion",
+ cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
// Disable processing of fdiv so we can better test the backend implementations.
static cl::opt<bool> DisableFDivExpand(
- "amdgpu-codegenprepare-disable-fdiv-expansion",
- cl::desc("Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
- cl::ReallyHidden,
- cl::init(false));
+ "amdgpu-codegenprepare-disable-fdiv-expansion",
+ cl::desc(
+ "Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
+ cl::ReallyHidden, cl::init(false));
class AMDGPUCodeGenPrepareImpl
: public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
@@ -180,26 +177,25 @@ class AMDGPUCodeGenPrepareImpl
/// we expand some divisions here, we need to perform this before obscuring.
bool foldBinOpIntoSelect(BinaryOperator &I) const;
- bool divHasSpecialOptimization(BinaryOperator &I,
- Value *Num, Value *Den) const;
+ bool divHasSpecialOptimization(BinaryOperator &I, Value *Num,
+ Value *Den) const;
unsigned getDivNumBits(BinaryOperator &I, Value *Num, Value *Den,
unsigned MaxDivBits, bool Signed) const;
/// Expands 24 bit div or rem.
- Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den,
- bool IsDiv, bool IsSigned) const;
+ Value *expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den, bool IsDiv, bool IsSigned) const;
- Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den, unsigned NumBits,
- bool IsDiv, bool IsSigned) const;
+ Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den, unsigned NumBits, bool IsDiv,
+ bool IsSigned) const;
/// Expands 32 bit div or rem.
- Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den) const;
+ Value *expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den) const;
- Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
- Value *Num, Value *Den) const;
+ Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I, Value *Num,
+ Value *Den) const;
void expandDivRem64(BinaryOperator &I) const;
/// Widen a scalar load.
@@ -310,7 +306,8 @@ bool AMDGPUCodeGenPrepareImpl::run() {
bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const {
return I.getOpcode() == Instruction::AShr ||
- I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
+ I.getOpcode() == Instruction::SDiv ||
+ I.getOpcode() == Instruction::SRem;
}
bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const {
@@ -351,8 +348,7 @@ static void extractValues(IRBuilder<> &Builder,
Values.push_back(Builder.CreateExtractElement(V, I));
}
-static Value *insertValues(IRBuilder<> &Builder,
- Type *Ty,
+static Value *insertValues(IRBuilder<> &Builder, Type *Ty,
SmallVectorImpl<Value *> &Values) {
if (!Ty->isVectorTy()) {
assert(Values.size() == 1);
@@ -494,8 +490,8 @@ bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
Builder.setFastMathFlags(FPOp->getFastMathFlags());
- Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
- FoldedT, FoldedF);
+ Value *NewSelect =
+ Builder.CreateSelect(Sel->getCondition(), FoldedT, FoldedF);
NewSelect->takeName(&BO);
BO.replaceAllUsesWith(NewSelect);
BO.eraseFromParent();
@@ -903,8 +899,8 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
return true;
}
-static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
- Value *LHS, Value *RHS) {
+static std::pair<Value *, Value *> getMul64(IRBuilder<> &Builder, Value *LHS,
+ Value *RHS) {
Type *I32Ty = Builder.getInt32Ty();
Type *I64Ty = Builder.getInt64Ty();
@@ -917,7 +913,7 @@ static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
return std::pair(Lo, Hi);
}
-static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
+static Value *getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
return getMul64(Builder, LHS, RHS).second;
}
@@ -1011,8 +1007,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
: Builder.CreateUIToFP(IA, F32Ty);
// float fb = (float)ib;
- Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
- : Builder.CreateUIToFP(IB,F32Ty);
+ Value *FB = IsSigned ? Builder.CreateSIToFP(IB, F32Ty)
+ : Builder.CreateUIToFP(IB, F32Ty);
Value *RCP = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp,
Builder.getFloatTy(), {FB});
@@ -1029,8 +1025,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
auto FMAD = !ST.hasMadMacF32Insts()
? Intrinsic::fma
: (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
- Value *FR = Builder.CreateIntrinsic(FMAD,
- {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
+ Value *FR =
+ Builder.CreateIntrinsic(FMAD, {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
// int iq = (int)fq;
Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
@@ -1066,8 +1062,7 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
Res = Builder.CreateShl(Res, InRegBits);
Res = Builder.CreateAShr(Res, InRegBits);
} else {
- ConstantInt *TruncMask
- = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
+ ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
Res = Builder.CreateAnd(Res, TruncMask);
}
}
@@ -1133,7 +1128,7 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
Builder.setFastMathFlags(FMF);
if (divHasSpecialOptimization(I, X, Y))
- return nullptr; // Keep it for later optimization.
+ return nullptr; // Keep it for later optimization.
bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
@@ -1153,8 +1148,8 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
}
if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
- return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
- Builder.CreateZExtOrTrunc(Res, Ty);
+ return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty)
+ : Builder.CreateZExtOrTrunc(Res, Ty);
}
ConstantInt *Zero = Builder.getInt32(0);
@@ -1249,7 +1244,7 @@ Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
BinaryOperator &I, Value *Num,
Value *Den) const {
if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
- return nullptr; // Keep it for later optimization.
+ return nullptr; // Keep it for later optimization.
Instruction::BinaryOps Opc = I.getOpcode();
@@ -1262,15 +1257,15 @@ Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
Value *Narrowed = nullptr;
if (NumDivBits <= 24) {
- Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
- IsDiv, IsSigned);
+ Narrowed =
+ expandDivRem24Impl(Builder, I, Num, Den, NumDivBits, IsDiv, IsSigned);
} else if (NumDivBits <= 32) {
Narrowed = expandDivRem32(Builder, I, Num, Den);
}
if (Narrowed) {
- return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
- Builder.CreateZExt(Narrowed, Num->getType());
+ return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType())
+ : Builder.CreateZExt(Narrowed, Num->getType());
}
return nullptr;
@@ -1386,8 +1381,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
- ScalarSize <= 64 &&
- !DisableIDivExpand) {
+ ScalarSize <= 64 && !DisableIDivExpand) {
Value *Num = I.getOperand(0);
Value *Den = I.getOperand(1);
IRBuilder<> Builder(&I);
@@ -1472,17 +1466,16 @@ bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
// If we have range metadata, we need to convert the type, and not make
// assumptions about the high bits.
if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
- ConstantInt *Lower =
- mdconst::extract<ConstantInt>(Range->getOperand(0));
+ ConstantInt *Lower = mdconst::extract<ConstantInt>(Range->getOperand(0));
if (Lower->isNullValue()) {
WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
} else {
Metadata *LowAndHigh[] = {
- ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
- // Don't make assumptions about the high bits.
- ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
- };
+ ConstantAsMetadata::get(
+ ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
+ // Don't make assumptions about the high bits.
+ ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))};
WidenLoad->setMetadata(LLVMContext::MD_range,
MDNode::get(F.getContext(), LowAndHigh));
@@ -2146,7 +2139,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) {
if (Wave != 0 && XLen == Wave) {
IRBuilder<> B(&I);
- CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
NewCall->takeName(&I);
ST.makeLIDRangeMetadata(NewCall);
I.replaceAllUsesWith(NewCall);
@@ -2205,7 +2199,8 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
if (Wave != 0 && XLen == Wave) {
IRBuilder<> B(&I);
- CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
NewCall->takeName(&I);
// Attach range metadata when available.
ST.makeLIDRangeMetadata(NewCall);
@@ -2235,16 +2230,17 @@ bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) {
}
}
} else {
- // No reqd_work_group_size metadata: be conservative and only handle the
- // common test harness cases where reqd_work_group_size metadata exists
+ // No reqd_work_group_size metadata: be conservative and only handle the
+ // common test harness cases where reqd_work_group_size metadata exists
// and equals 32/64.
if (auto *Node = F->getMetadata("reqd_work_group_size")) {
if (Node->getNumOperands() == 3) {
- unsigned XLen = mdconst::extract<ConstantInt>(Node->getOperand(0))
- ->getZExtValue();
+ unsigned XLen =
+ mdconst::extract<ConstantInt>(Node->getOperand(0))->getZExtValue();
if (XLen == 32 || XLen == 64) {
IRBuilder<> B(&I);
- CallInst *NewCall = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
+ CallInst *NewCall =
+ B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
NewCall->takeName(&I);
I.replaceAllUsesWith(NewCall);
I.eraseFromParent();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
index 6b301b55a5895..1481193d937c0 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp
@@ -1329,8 +1329,7 @@ GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
return IC.replaceInstUsesWith(II, Constant::getNullValue(II.getType()));
}
}
- if (ST->isWave32() &&
- II.getType()->getIntegerBitWidth() == 64) {
+ if (ST->isWave32() && II.getType()->getIntegerBitWidth() == 64) {
// %b64 = call i64 ballot.i64(...)
// =>
// %b32 = call i32 ballot.i32(...)
More information about the libcxx-commits
mailing list