[llvm] 95c2d01 - [FPEnv][RISCV] Correct strictfp tests.

Kevin P. Neal via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 20 07:18:18 PDT 2023


Author: Kevin P. Neal
Date: 2023-07-20T10:16:56-04:00
New Revision: 95c2d01dfedc2bc97e1264d212da39154da015d1

URL: https://github.com/llvm/llvm-project/commit/95c2d01dfedc2bc97e1264d212da39154da015d1
DIFF: https://github.com/llvm/llvm-project/commit/95c2d01dfedc2bc97e1264d212da39154da015d1.diff

LOG: [FPEnv][RISCV] Correct strictfp tests.

Correct RISC-V strictfp tests to follow the rules documented in the LangRef:
https://llvm.org/docs/LangRef.html#constrained-floating-point-intrinsics

Mostly these tests just needed the strictfp attribute on function definitions.
I've also removed the strictfp attribute from uses of the constrained
intrinsics because it comes by default since D154991, but I only did this
in tests I was changing anyway.

Test changes verified with D146845.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/double-convert-strict.ll
    llvm/test/CodeGen/RISCV/float-convert-strict.ll
    llvm/test/CodeGen/RISCV/half-convert-strict.ll
    llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll
    llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
    llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/double-convert-strict.ll b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
index d11d55de968635..adbe2c9e9754d2 100644
--- a/llvm/test/CodeGen/RISCV/double-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/double-convert-strict.ll
@@ -147,7 +147,7 @@ define i32 @fcvt_w_d(double %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
@@ -193,14 +193,14 @@ define i32 @fcvt_wu_d(double %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
 
 ; Test where the fptoui has multiple uses, one of which causes a sext to be
 ; inserted on RV64.
-define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
+define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind strictfp {
 ; CHECKIFD-LABEL: fcvt_wu_d_multiple_use:
 ; CHECKIFD:       # %bb.0:
 ; CHECKIFD-NEXT:    fcvt.wu.d a0, fa0, rtz
@@ -249,7 +249,7 @@ define i32 @fcvt_wu_d_multiple_use(double %x, ptr %y) nounwind {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict") strictfp
+  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %x, metadata !"fpexcept.strict")
   %b = icmp eq i32 %a, 0
   %c = select i1 %b, i32 1, i32 %a
   ret i32 %c
@@ -295,7 +295,7 @@ define double @fcvt_d_w(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
@@ -345,7 +345,7 @@ define double @fcvt_d_w_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %a = load i32, ptr %p
-  %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 
@@ -389,7 +389,7 @@ define double @fcvt_d_wu(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
@@ -445,7 +445,7 @@ define double @fcvt_d_wu_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %a = load i32, ptr %p
-  %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 
@@ -495,7 +495,7 @@ define i64 @fcvt_l_d(double %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %a, metadata !"fpexcept.strict")
   ret i64 %1
 }
 declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
@@ -546,7 +546,7 @@ define i64 @fcvt_lu_d(double %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %a, metadata !"fpexcept.strict")
   ret i64 %1
 }
 declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
@@ -597,7 +597,7 @@ define double @fcvt_d_l(i64 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
@@ -648,7 +648,7 @@ define double @fcvt_d_lu(i64 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
@@ -692,7 +692,7 @@ define double @fcvt_d_w_i8(i8 signext %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.sitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.sitofp.f64.i8(i8, metadata, metadata)
@@ -736,7 +736,7 @@ define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.uitofp.f64.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.uitofp.f64.i8(i8, metadata, metadata)
@@ -780,7 +780,7 @@ define double @fcvt_d_w_i16(i16 signext %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.sitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.sitofp.f64.i16(i16, metadata, metadata)
@@ -824,13 +824,13 @@ define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call double @llvm.experimental.constrained.uitofp.f64.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret double %1
 }
 declare double @llvm.experimental.constrained.uitofp.f64.i16(i16, metadata, metadata)
 
 ; Make sure we select W version of addi on RV64.
-define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
+define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_w_demanded_bits:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, a0, 1
@@ -899,13 +899,13 @@ define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %3 = add i32 %0, 1
-  %4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %4 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
   store double %4, ptr %1, align 8
   ret i32 %3
 }
 
 ; Make sure we select W version of addi on RV64.
-define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
+define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
 ; RV32IFD-LABEL: fcvt_d_wu_demanded_bits:
 ; RV32IFD:       # %bb.0:
 ; RV32IFD-NEXT:    addi a0, a0, 1
@@ -972,7 +972,7 @@ define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %3 = add i32 %0, 1
-  %4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %4 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
   store double %4, ptr %1, align 8
   ret i32 %3
 }

diff  --git a/llvm/test/CodeGen/RISCV/float-convert-strict.ll b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
index 38fe0b911005fe..6168ade0839fb1 100644
--- a/llvm/test/CodeGen/RISCV/float-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/float-convert-strict.ll
@@ -48,7 +48,7 @@ define i32 @fcvt_w_s(float %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
@@ -81,14 +81,14 @@ define i32 @fcvt_wu_s(float %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
 
 ; Test where the fptoui has multiple uses, one of which causes a sext to be
 ; inserted on RV64.
-define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
+define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind strictfp {
 ; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
 ; CHECKIF:       # %bb.0:
 ; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
@@ -124,7 +124,7 @@ define i32 @fcvt_wu_s_multiple_use(float %x, ptr %y) nounwind {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") strictfp
+  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict")
   %b = icmp eq i32 %a, 0
   %c = select i1 %b, i32 1, i32 %a
   ret i32 %c
@@ -159,7 +159,7 @@ define float @fcvt_s_w(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
@@ -197,7 +197,7 @@ define float @fcvt_s_w_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %a = load i32, ptr %p
-  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 
@@ -230,7 +230,7 @@ define float @fcvt_s_wu(i32 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata)
@@ -280,7 +280,7 @@ define float @fcvt_s_wu_load(ptr %p) nounwind strictfp {
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
   %a = load i32, ptr %p
-  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 
@@ -330,7 +330,7 @@ define i64 @fcvt_l_s(float %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict")
   ret i64 %1
 }
 declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
@@ -381,7 +381,7 @@ define i64 @fcvt_lu_s(float %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict")
   ret i64 %1
 }
 declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
@@ -432,7 +432,7 @@ define float @fcvt_s_l(i64 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
@@ -483,7 +483,7 @@ define float @fcvt_s_lu(i64 %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
@@ -516,7 +516,7 @@ define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
@@ -582,7 +582,7 @@ define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
@@ -615,13 +615,13 @@ define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
 ; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
 ; RV64I-NEXT:    addi sp, sp, 16
 ; RV64I-NEXT:    ret
-  %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret float %1
 }
 declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
 
 ; Make sure we select W version of addi on RV64.
-define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
+define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_w_demanded_bits:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi a0, a0, 1
@@ -688,13 +688,13 @@ define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %3 = add i32 %0, 1
-  %4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
   store float %4, ptr %1, align 4
   ret i32 %3
 }
 
 ; Make sure we select W version of addi on RV64.
-define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
+define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind strictfp {
 ; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
 ; RV32IF:       # %bb.0:
 ; RV32IF-NEXT:    addi a0, a0, 1
@@ -759,7 +759,7 @@ define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, ptr %1) nounwind {
 ; RV64I-NEXT:    addi sp, sp, 32
 ; RV64I-NEXT:    ret
   %3 = add i32 %0, 1
-  %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
   store float %4, ptr %1, align 4
   ret i32 %3
 }

diff  --git a/llvm/test/CodeGen/RISCV/half-convert-strict.ll b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
index d994c9998ee77b..20a178b32b7238 100644
--- a/llvm/test/CodeGen/RISCV/half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/half-convert-strict.ll
@@ -128,7 +128,7 @@ define i16 @fcvt_si_h(half %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.l.s a0, a0, rtz
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i16 @llvm.experimental.constrained.fptosi.i16.f16(half %a, metadata !"fpexcept.strict")
   ret i16 %1
 }
 declare i16 @llvm.experimental.constrained.fptosi.i16.f16(half, metadata)
@@ -209,7 +209,7 @@ define i16 @fcvt_ui_h(half %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.lu.s a0, a0, rtz
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i16 @llvm.experimental.constrained.fptoui.i16.f16(half %a, metadata !"fpexcept.strict")
   ret i16 %1
 }
 declare i16 @llvm.experimental.constrained.fptoui.i16.f16(half, metadata)
@@ -280,7 +280,7 @@ define i32 @fcvt_w_h(half %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.w.s a0, a0, rtz
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
@@ -351,7 +351,7 @@ define i32 @fcvt_wu_h(half %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.wu.s a0, a0, rtz
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
@@ -359,7 +359,7 @@ declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
 ; Test where the fptoui has multiple uses, one of which causes a sext to be
 ; inserted on RV64.
 ; FIXME: We should not have an fcvt.wu.h and an fcvt.lu.h.
-define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) {
+define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) strictfp {
 ; CHECKIZFH-LABEL: fcvt_wu_h_multiple_use:
 ; CHECKIZFH:       # %bb.0:
 ; CHECKIZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
@@ -449,7 +449,7 @@ define i32 @fcvt_wu_h_multiple_use(half %x, ptr %y) {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    seqz a1, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    add a0, a0, a1
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict") strictfp
+  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %x, metadata !"fpexcept.strict")
   %b = icmp eq i32 %a, 0
   %c = select i1 %b, i32 1, i32 %a
   ret i32 %c
@@ -556,7 +556,7 @@ define i64 @fcvt_l_h(half %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.l.s a0, a0, rtz
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f16(half %a, metadata !"fpexcept.strict")
   ret i64 %1
 }
 declare i64 @llvm.experimental.constrained.fptosi.i64.f16(half, metadata)
@@ -662,7 +662,7 @@ define i64 @fcvt_lu_h(half %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.lu.s a0, a0, rtz
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f16(half %a, metadata !"fpexcept.strict")
   ret i64 %1
 }
 declare i64 @llvm.experimental.constrained.fptoui.i64.f16(half, metadata)
@@ -771,7 +771,7 @@ define half @fcvt_h_si(i16 %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.l a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.sitofp.f16.i16(i16, metadata, metadata)
@@ -842,7 +842,7 @@ define half @fcvt_h_si_signext(i16 signext %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.l a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
@@ -950,7 +950,7 @@ define half @fcvt_h_ui(i16 %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.lu a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.uitofp.f16.i16(i16, metadata, metadata)
@@ -1021,7 +1021,7 @@ define half @fcvt_h_ui_zeroext(i16 zeroext %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.lu a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
@@ -1094,7 +1094,7 @@ define half @fcvt_h_w(i32 %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.l a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.sitofp.f16.i32(i32, metadata, metadata)
@@ -1178,7 +1178,7 @@ define half @fcvt_h_w_load(ptr %p) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
   %a = load i32, ptr %p
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
@@ -1254,7 +1254,7 @@ define half @fcvt_h_wu(i32 %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.lu a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.uitofp.f16.i32(i32, metadata, metadata)
@@ -1350,7 +1350,7 @@ define half @fcvt_h_wu_load(ptr %p) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
   %a = load i32, ptr %p
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
@@ -1455,7 +1455,7 @@ define half @fcvt_h_l(i64 %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.l a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.sitofp.f16.i64(i64, metadata, metadata)
@@ -1561,7 +1561,7 @@ define half @fcvt_h_lu(i64 %a) nounwind strictfp {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.s.lu a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.uitofp.f16.i64(i64, metadata, metadata)
@@ -1963,7 +1963,7 @@ define double @fcvt_d_h(half %a) nounwind strictfp {
 declare double @llvm.experimental.constrained.fpext.f64.f16(half, metadata)
 
 ; Make sure we select W version of addi on RV64.
-define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) {
+define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) strictfp {
 ; RV32IZFH-LABEL: fcvt_h_w_demanded_bits:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi a0, a0, 1
@@ -2072,13 +2072,13 @@ define signext i32 @fcvt_h_w_demanded_bits(i32 signext %0, ptr %1) {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    sh a2, 0(a1)
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
   %3 = add i32 %0, 1
-  %4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %4 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
   store half %4, ptr %1, align 2
   ret i32 %3
 }
 
 ; Make sure we select W version of addi on RV64.
-define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) {
+define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) strictfp {
 ; RV32IZFH-LABEL: fcvt_h_wu_demanded_bits:
 ; RV32IZFH:       # %bb.0:
 ; RV32IZFH-NEXT:    addi a0, a0, 1
@@ -2189,7 +2189,7 @@ define signext i32 @fcvt_h_wu_demanded_bits(i32 signext %0, ptr %1) {
 ; CHECK64-IZDINXZHINXMIN-NEXT:    sh a2, 0(a1)
 ; CHECK64-IZDINXZHINXMIN-NEXT:    ret
   %3 = add i32 %0, 1
-  %4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %4 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict")
   store half %4, ptr %1, align 2
   ret i32 %3
 }

diff  --git a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll
index 131e358677d208..afc41fe86b838c 100644
--- a/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zfh-half-convert-strict.ll
@@ -11,7 +11,7 @@
 ; generated for an fpto[s|u]i conversion if the result doesn't fit in the
 ; target type.
 
-define i32 @aext_fptosi(half %a) nounwind {
+define i32 @aext_fptosi(half %a) nounwind strictfp {
 ; RV64IZFH-LABEL: aext_fptosi:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
@@ -21,12 +21,12 @@ define i32 @aext_fptosi(half %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.w.h a0, a0, rtz
 ; RV64IZHINX-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
 
-define signext i32 @sext_fptosi(half %a) nounwind {
+define signext i32 @sext_fptosi(half %a) nounwind strictfp {
 ; RV64IZFH-LABEL: sext_fptosi:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
@@ -36,11 +36,11 @@ define signext i32 @sext_fptosi(half %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.w.h a0, a0, rtz
 ; RV64IZHINX-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define zeroext i32 @zext_fptosi(half %a) nounwind {
+define zeroext i32 @zext_fptosi(half %a) nounwind strictfp {
 ; RV64IZFH-LABEL: zext_fptosi:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.w.h a0, fa0, rtz
@@ -54,11 +54,11 @@ define zeroext i32 @zext_fptosi(half %a) nounwind {
 ; RV64IZHINX-NEXT:    slli a0, a0, 32
 ; RV64IZHINX-NEXT:    srli a0, a0, 32
 ; RV64IZHINX-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define i32 @aext_fptoui(half %a) nounwind {
+define i32 @aext_fptoui(half %a) nounwind strictfp {
 ; RV64IZFH-LABEL: aext_fptoui:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
@@ -68,12 +68,12 @@ define i32 @aext_fptoui(half %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.wu.h a0, a0, rtz
 ; RV64IZHINX-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
 
-define signext i32 @sext_fptoui(half %a) nounwind {
+define signext i32 @sext_fptoui(half %a) nounwind strictfp {
 ; RV64IZFH-LABEL: sext_fptoui:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.wu.h a0, fa0, rtz
@@ -83,11 +83,11 @@ define signext i32 @sext_fptoui(half %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.wu.h a0, a0, rtz
 ; RV64IZHINX-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define zeroext i32 @zext_fptoui(half %a) nounwind {
+define zeroext i32 @zext_fptoui(half %a) nounwind strictfp {
 ; RV64IZFH-LABEL: zext_fptoui:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.lu.h a0, fa0, rtz
@@ -97,11 +97,11 @@ define zeroext i32 @zext_fptoui(half %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.lu.h a0, a0, rtz
 ; RV64IZHINX-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define half @uitofp_aext_i32_to_f16(i32 %a) nounwind {
+define half @uitofp_aext_i32_to_f16(i32 %a) nounwind strictfp {
 ; RV64IZFH-LABEL: uitofp_aext_i32_to_f16:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.h.wu fa0, a0
@@ -111,12 +111,12 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.h.wu a0, a0
 ; RV64IZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata, metadata)
 
-define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
+define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp {
 ; RV64IZFH-LABEL: uitofp_sext_i32_to_f16:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.h.wu fa0, a0
@@ -126,11 +126,11 @@ define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.h.wu a0, a0
 ; RV64IZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
-define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
+define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind strictfp {
 ; RV64IZFH-LABEL: uitofp_zext_i32_to_f16:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.h.wu fa0, a0
@@ -140,11 +140,11 @@ define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.h.wu a0, a0
 ; RV64IZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
-define half @sitofp_aext_i32_to_f16(i32 %a) nounwind {
+define half @sitofp_aext_i32_to_f16(i32 %a) nounwind strictfp {
 ; RV64IZFH-LABEL: sitofp_aext_i32_to_f16:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.h.w fa0, a0
@@ -154,12 +154,12 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.h.w a0, a0
 ; RV64IZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata, metadata)
 
-define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind {
+define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp {
 ; RV64IZFH-LABEL: sitofp_sext_i32_to_f16:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.h.w fa0, a0
@@ -169,11 +169,11 @@ define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.h.w a0, a0
 ; RV64IZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
-define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
+define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind strictfp {
 ; RV64IZFH-LABEL: sitofp_zext_i32_to_f16:
 ; RV64IZFH:       # %bb.0:
 ; RV64IZFH-NEXT:    fcvt.h.w fa0, a0
@@ -183,6 +183,6 @@ define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZHINX:       # %bb.0:
 ; RV64IZHINX-NEXT:    fcvt.h.w a0, a0
 ; RV64IZHINX-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
index aae8d1db05f456..7c0b84f9c18632 100644
--- a/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zfhmin-half-convert-strict.ll
@@ -8,7 +8,7 @@
 
 ; This file exhaustively checks half<->i32 conversions.
 
-define i32 @aext_fptosi(half %a) nounwind {
+define i32 @aext_fptosi(half %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: aext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
@@ -20,12 +20,12 @@ define i32 @aext_fptosi(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.w.s a0, a0, rtz
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptosi.i32.f16(half, metadata)
 
-define signext i32 @sext_fptosi(half %a) nounwind {
+define signext i32 @sext_fptosi(half %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: sext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
@@ -37,11 +37,11 @@ define signext i32 @sext_fptosi(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.w.s a0, a0, rtz
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define zeroext i32 @zext_fptosi(half %a) nounwind {
+define zeroext i32 @zext_fptosi(half %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: zext_fptosi:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
@@ -57,11 +57,11 @@ define zeroext i32 @zext_fptosi(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    slli a0, a0, 32
 ; RV64IZHINXMIN-NEXT:    srli a0, a0, 32
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define i32 @aext_fptoui(half %a) nounwind {
+define i32 @aext_fptoui(half %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: aext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
@@ -73,12 +73,12 @@ define i32 @aext_fptoui(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.wu.s a0, a0, rtz
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 declare i32 @llvm.experimental.constrained.fptoui.i32.f16(half, metadata)
 
-define signext i32 @sext_fptoui(half %a) nounwind {
+define signext i32 @sext_fptoui(half %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: sext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
@@ -90,11 +90,11 @@ define signext i32 @sext_fptoui(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.wu.s a0, a0, rtz
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define zeroext i32 @zext_fptoui(half %a) nounwind {
+define zeroext i32 @zext_fptoui(half %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: zext_fptoui:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.h fa5, fa0
@@ -106,11 +106,11 @@ define zeroext i32 @zext_fptoui(half %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.h a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.lu.s a0, a0, rtz
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict") strictfp
+  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f16(half %a, metadata !"fpexcept.strict")
   ret i32 %1
 }
 
-define half @uitofp_aext_i32_to_f16(i32 %a) nounwind {
+define half @uitofp_aext_i32_to_f16(i32 %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: uitofp_aext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
@@ -126,12 +126,12 @@ define half @uitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.lu a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata, metadata)
 
-define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
+define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: uitofp_sext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    slli a0, a0, 32
@@ -147,11 +147,11 @@ define half @uitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.lu a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
-define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
+define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: uitofp_zext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.lu fa5, a0
@@ -163,11 +163,11 @@ define half @uitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.lu a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.uitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
-define half @sitofp_aext_i32_to_f16(i32 %a) nounwind {
+define half @sitofp_aext_i32_to_f16(i32 %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: sitofp_aext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    sext.w a0, a0
@@ -181,12 +181,12 @@ define half @sitofp_aext_i32_to_f16(i32 %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.l a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 declare half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata, metadata)
 
-define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind {
+define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: sitofp_sext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    fcvt.s.l fa5, a0
@@ -198,11 +198,11 @@ define half @sitofp_sext_i32_to_f16(i32 signext %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.l a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }
 
-define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
+define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind strictfp {
 ; RV64IZFHMIN-LABEL: sitofp_zext_i32_to_f16:
 ; RV64IZFHMIN:       # %bb.0:
 ; RV64IZFHMIN-NEXT:    sext.w a0, a0
@@ -216,6 +216,6 @@ define half @sitofp_zext_i32_to_f16(i32 zeroext %a) nounwind {
 ; RV64IZHINXMIN-NEXT:    fcvt.s.l a0, a0
 ; RV64IZHINXMIN-NEXT:    fcvt.h.s a0, a0
 ; RV64IZHINXMIN-NEXT:    ret
-  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
+  %1 = call half @llvm.experimental.constrained.sitofp.f16.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret half %1
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll
index 1c7938df7b2cef..75747a6674b7b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fceil-constrained-sdnode.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) {
+define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -26,7 +26,7 @@ define <vscale x 1 x half> @ceil_nxv1f16(<vscale x 1 x half> %x) {
 }
 declare <vscale x 1 x half> @llvm.experimental.constrained.ceil.nxv1f16(<vscale x 1 x half>, metadata)
 
-define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) {
+define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -48,7 +48,7 @@ define <vscale x 2 x half> @ceil_nxv2f16(<vscale x 2 x half> %x) {
 }
 declare <vscale x 2 x half> @llvm.experimental.constrained.ceil.nxv2f16(<vscale x 2 x half>, metadata)
 
-define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) {
+define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -70,7 +70,7 @@ define <vscale x 4 x half> @ceil_nxv4f16(<vscale x 4 x half> %x) {
 }
 declare <vscale x 4 x half> @llvm.experimental.constrained.ceil.nxv4f16(<vscale x 4 x half>, metadata)
 
-define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) {
+define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -92,7 +92,7 @@ define <vscale x 8 x half> @ceil_nxv8f16(<vscale x 8 x half> %x) {
 }
 declare <vscale x 8 x half> @llvm.experimental.constrained.ceil.nxv8f16(<vscale x 8 x half>, metadata)
 
-define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) {
+define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -114,7 +114,7 @@ define <vscale x 16 x half> @ceil_nxv16f16(<vscale x 16 x half> %x) {
 }
 declare <vscale x 16 x half> @llvm.experimental.constrained.ceil.nxv16f16(<vscale x 16 x half>, metadata)
 
-define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) {
+define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -136,7 +136,7 @@ define <vscale x 32 x half> @ceil_nxv32f16(<vscale x 32 x half> %x) {
 }
 declare <vscale x 32 x half> @llvm.experimental.constrained.ceil.nxv32f16(<vscale x 32 x half>, metadata)
 
-define <vscale x 1 x float> @ceil_nxv1f32(<vscale x 1 x float> %x) {
+define <vscale x 1 x float> @ceil_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -158,7 +158,7 @@ define <vscale x 1 x float> @ceil_nxv1f32(<vscale x 1 x float> %x) {
 }
 declare <vscale x 1 x float> @llvm.experimental.constrained.ceil.nxv1f32(<vscale x 1 x float>, metadata)
 
-define <vscale x 2 x float> @ceil_nxv2f32(<vscale x 2 x float> %x) {
+define <vscale x 2 x float> @ceil_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -180,7 +180,7 @@ define <vscale x 2 x float> @ceil_nxv2f32(<vscale x 2 x float> %x) {
 }
 declare <vscale x 2 x float> @llvm.experimental.constrained.ceil.nxv2f32(<vscale x 2 x float>, metadata)
 
-define <vscale x 4 x float> @ceil_nxv4f32(<vscale x 4 x float> %x) {
+define <vscale x 4 x float> @ceil_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -202,7 +202,7 @@ define <vscale x 4 x float> @ceil_nxv4f32(<vscale x 4 x float> %x) {
 }
 declare <vscale x 4 x float> @llvm.experimental.constrained.ceil.nxv4f32(<vscale x 4 x float>, metadata)
 
-define <vscale x 8 x float> @ceil_nxv8f32(<vscale x 8 x float> %x) {
+define <vscale x 8 x float> @ceil_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -224,7 +224,7 @@ define <vscale x 8 x float> @ceil_nxv8f32(<vscale x 8 x float> %x) {
 }
 declare <vscale x 8 x float> @llvm.experimental.constrained.ceil.nxv8f32(<vscale x 8 x float>, metadata)
 
-define <vscale x 16 x float> @ceil_nxv16f32(<vscale x 16 x float> %x) {
+define <vscale x 16 x float> @ceil_nxv16f32(<vscale x 16 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -246,7 +246,7 @@ define <vscale x 16 x float> @ceil_nxv16f32(<vscale x 16 x float> %x) {
 }
 declare <vscale x 16 x float> @llvm.experimental.constrained.ceil.nxv16f32(<vscale x 16 x float>, metadata)
 
-define <vscale x 1 x double> @ceil_nxv1f64(<vscale x 1 x double> %x) {
+define <vscale x 1 x double> @ceil_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -268,7 +268,7 @@ define <vscale x 1 x double> @ceil_nxv1f64(<vscale x 1 x double> %x) {
 }
 declare <vscale x 1 x double> @llvm.experimental.constrained.ceil.nxv1f64(<vscale x 1 x double>, metadata)
 
-define <vscale x 2 x double> @ceil_nxv2f64(<vscale x 2 x double> %x) {
+define <vscale x 2 x double> @ceil_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -290,7 +290,7 @@ define <vscale x 2 x double> @ceil_nxv2f64(<vscale x 2 x double> %x) {
 }
 declare <vscale x 2 x double> @llvm.experimental.constrained.ceil.nxv2f64(<vscale x 2 x double>, metadata)
 
-define <vscale x 4 x double> @ceil_nxv4f64(<vscale x 4 x double> %x) {
+define <vscale x 4 x double> @ceil_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -312,7 +312,7 @@ define <vscale x 4 x double> @ceil_nxv4f64(<vscale x 4 x double> %x) {
 }
 declare <vscale x 4 x double> @llvm.experimental.constrained.ceil.nxv4f64(<vscale x 4 x double>, metadata)
 
-define <vscale x 8 x double> @ceil_nxv8f64(<vscale x 8 x double> %x) {
+define <vscale x 8 x double> @ceil_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll
index 9382fa43bb7e5c..31a94532044574 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ffloor-constrained-sdnode.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) {
+define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) strictfp {
 ; CHECK-LABEL: floor_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -26,7 +26,7 @@ define <vscale x 1 x half> @floor_nxv1f16(<vscale x 1 x half> %x) {
 }
 declare <vscale x 1 x half> @llvm.experimental.constrained.floor.nxv1f16(<vscale x 1 x half>, metadata)
 
-define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) {
+define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-LABEL: floor_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -48,7 +48,7 @@ define <vscale x 2 x half> @floor_nxv2f16(<vscale x 2 x half> %x) {
 }
 declare <vscale x 2 x half> @llvm.experimental.constrained.floor.nxv2f16(<vscale x 2 x half>, metadata)
 
-define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) {
+define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-LABEL: floor_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -70,7 +70,7 @@ define <vscale x 4 x half> @floor_nxv4f16(<vscale x 4 x half> %x) {
 }
 declare <vscale x 4 x half> @llvm.experimental.constrained.floor.nxv4f16(<vscale x 4 x half>, metadata)
 
-define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) {
+define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-LABEL: floor_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -92,7 +92,7 @@ define <vscale x 8 x half> @floor_nxv8f16(<vscale x 8 x half> %x) {
 }
 declare <vscale x 8 x half> @llvm.experimental.constrained.floor.nxv8f16(<vscale x 8 x half>, metadata)
 
-define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) {
+define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-LABEL: floor_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -114,7 +114,7 @@ define <vscale x 16 x half> @floor_nxv16f16(<vscale x 16 x half> %x) {
 }
 declare <vscale x 16 x half> @llvm.experimental.constrained.floor.nxv16f16(<vscale x 16 x half>, metadata)
 
-define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) {
+define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-LABEL: floor_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -136,7 +136,7 @@ define <vscale x 32 x half> @floor_nxv32f16(<vscale x 32 x half> %x) {
 }
 declare <vscale x 32 x half> @llvm.experimental.constrained.floor.nxv32f16(<vscale x 32 x half>, metadata)
 
-define <vscale x 1 x float> @floor_nxv1f32(<vscale x 1 x float> %x) {
+define <vscale x 1 x float> @floor_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-LABEL: floor_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -158,7 +158,7 @@ define <vscale x 1 x float> @floor_nxv1f32(<vscale x 1 x float> %x) {
 }
 declare <vscale x 1 x float> @llvm.experimental.constrained.floor.nxv1f32(<vscale x 1 x float>, metadata)
 
-define <vscale x 2 x float> @floor_nxv2f32(<vscale x 2 x float> %x) {
+define <vscale x 2 x float> @floor_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-LABEL: floor_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -180,7 +180,7 @@ define <vscale x 2 x float> @floor_nxv2f32(<vscale x 2 x float> %x) {
 }
 declare <vscale x 2 x float> @llvm.experimental.constrained.floor.nxv2f32(<vscale x 2 x float>, metadata)
 
-define <vscale x 4 x float> @floor_nxv4f32(<vscale x 4 x float> %x) {
+define <vscale x 4 x float> @floor_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-LABEL: floor_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -202,7 +202,7 @@ define <vscale x 4 x float> @floor_nxv4f32(<vscale x 4 x float> %x) {
 }
 declare <vscale x 4 x float> @llvm.experimental.constrained.floor.nxv4f32(<vscale x 4 x float>, metadata)
 
-define <vscale x 8 x float> @floor_nxv8f32(<vscale x 8 x float> %x) {
+define <vscale x 8 x float> @floor_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-LABEL: floor_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -224,7 +224,7 @@ define <vscale x 8 x float> @floor_nxv8f32(<vscale x 8 x float> %x) {
 }
 declare <vscale x 8 x float> @llvm.experimental.constrained.floor.nxv8f32(<vscale x 8 x float>, metadata)
 
-define <vscale x 16 x float> @floor_nxv16f32(<vscale x 16 x float> %x) {
+define <vscale x 16 x float> @floor_nxv16f32(<vscale x 16 x float> %x) strictfp {
 ; CHECK-LABEL: floor_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -246,7 +246,7 @@ define <vscale x 16 x float> @floor_nxv16f32(<vscale x 16 x float> %x) {
 }
 declare <vscale x 16 x float> @llvm.experimental.constrained.floor.nxv16f32(<vscale x 16 x float>, metadata)
 
-define <vscale x 1 x double> @floor_nxv1f64(<vscale x 1 x double> %x) {
+define <vscale x 1 x double> @floor_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-LABEL: floor_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -268,7 +268,7 @@ define <vscale x 1 x double> @floor_nxv1f64(<vscale x 1 x double> %x) {
 }
 declare <vscale x 1 x double> @llvm.experimental.constrained.floor.nxv1f64(<vscale x 1 x double>, metadata)
 
-define <vscale x 2 x double> @floor_nxv2f64(<vscale x 2 x double> %x) {
+define <vscale x 2 x double> @floor_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-LABEL: floor_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -290,7 +290,7 @@ define <vscale x 2 x double> @floor_nxv2f64(<vscale x 2 x double> %x) {
 }
 declare <vscale x 2 x double> @llvm.experimental.constrained.floor.nxv2f64(<vscale x 2 x double>, metadata)
 
-define <vscale x 4 x double> @floor_nxv4f64(<vscale x 4 x double> %x) {
+define <vscale x 4 x double> @floor_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-LABEL: floor_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -312,7 +312,7 @@ define <vscale x 4 x double> @floor_nxv4f64(<vscale x 4 x double> %x) {
 }
 declare <vscale x 4 x double> @llvm.experimental.constrained.floor.nxv4f64(<vscale x 4 x double>, metadata)
 
-define <vscale x 8 x double> @floor_nxv8f64(<vscale x 8 x double> %x) {
+define <vscale x 8 x double> @floor_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-LABEL: floor_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll
index 03a04b6da4e1f6..1e93a73ede5d65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fceil-constrained-sdnode.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-define <1 x half> @ceil_v1f16(<1 x half> %x) {
+define <1 x half> @ceil_v1f16(<1 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -26,7 +26,7 @@ define <1 x half> @ceil_v1f16(<1 x half> %x) {
 }
 declare <1 x half> @llvm.experimental.constrained.ceil.v1f16(<1 x half>, metadata)
 
-define <2 x half> @ceil_v2f16(<2 x half> %x) {
+define <2 x half> @ceil_v2f16(<2 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -48,7 +48,7 @@ define <2 x half> @ceil_v2f16(<2 x half> %x) {
 }
 declare <2 x half> @llvm.experimental.constrained.ceil.v2f16(<2 x half>, metadata)
 
-define <4 x half> @ceil_v4f16(<4 x half> %x) {
+define <4 x half> @ceil_v4f16(<4 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -70,7 +70,7 @@ define <4 x half> @ceil_v4f16(<4 x half> %x) {
 }
 declare <4 x half> @llvm.experimental.constrained.ceil.v4f16(<4 x half>, metadata)
 
-define <8 x half> @ceil_v8f16(<8 x half> %x) {
+define <8 x half> @ceil_v8f16(<8 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -92,7 +92,7 @@ define <8 x half> @ceil_v8f16(<8 x half> %x) {
 }
 declare <8 x half> @llvm.experimental.constrained.ceil.v8f16(<8 x half>, metadata)
 
-define <16 x half> @ceil_v16f16(<16 x half> %x) {
+define <16 x half> @ceil_v16f16(<16 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -114,7 +114,7 @@ define <16 x half> @ceil_v16f16(<16 x half> %x) {
 }
 declare <16 x half> @llvm.experimental.constrained.ceil.v16f16(<16 x half>, metadata)
 
-define <32 x half> @ceil_v32f16(<32 x half> %x) {
+define <32 x half> @ceil_v32f16(<32 x half> %x) strictfp {
 ; CHECK-LABEL: ceil_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -137,7 +137,7 @@ define <32 x half> @ceil_v32f16(<32 x half> %x) {
 }
 declare <32 x half> @llvm.experimental.constrained.ceil.v32f16(<32 x half>, metadata)
 
-define <1 x float> @ceil_v1f32(<1 x float> %x) {
+define <1 x float> @ceil_v1f32(<1 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -159,7 +159,7 @@ define <1 x float> @ceil_v1f32(<1 x float> %x) {
 }
 declare <1 x float> @llvm.experimental.constrained.ceil.v1f32(<1 x float>, metadata)
 
-define <2 x float> @ceil_v2f32(<2 x float> %x) {
+define <2 x float> @ceil_v2f32(<2 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -181,7 +181,7 @@ define <2 x float> @ceil_v2f32(<2 x float> %x) {
 }
 declare <2 x float> @llvm.experimental.constrained.ceil.v2f32(<2 x float>, metadata)
 
-define <4 x float> @ceil_v4f32(<4 x float> %x) {
+define <4 x float> @ceil_v4f32(<4 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -203,7 +203,7 @@ define <4 x float> @ceil_v4f32(<4 x float> %x) {
 }
 declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
 
-define <8 x float> @ceil_v8f32(<8 x float> %x) {
+define <8 x float> @ceil_v8f32(<8 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -225,7 +225,7 @@ define <8 x float> @ceil_v8f32(<8 x float> %x) {
 }
 declare <8 x float> @llvm.experimental.constrained.ceil.v8f32(<8 x float>, metadata)
 
-define <16 x float> @ceil_v16f32(<16 x float> %x) {
+define <16 x float> @ceil_v16f32(<16 x float> %x) strictfp {
 ; CHECK-LABEL: ceil_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -247,7 +247,7 @@ define <16 x float> @ceil_v16f32(<16 x float> %x) {
 }
 declare <16 x float> @llvm.experimental.constrained.ceil.v16f32(<16 x float>, metadata)
 
-define <1 x double> @ceil_v1f64(<1 x double> %x) {
+define <1 x double> @ceil_v1f64(<1 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -269,7 +269,7 @@ define <1 x double> @ceil_v1f64(<1 x double> %x) {
 }
 declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata)
 
-define <2 x double> @ceil_v2f64(<2 x double> %x) {
+define <2 x double> @ceil_v2f64(<2 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -291,7 +291,7 @@ define <2 x double> @ceil_v2f64(<2 x double> %x) {
 }
 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
 
-define <4 x double> @ceil_v4f64(<4 x double> %x) {
+define <4 x double> @ceil_v4f64(<4 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -313,7 +313,7 @@ define <4 x double> @ceil_v4f64(<4 x double> %x) {
 }
 declare <4 x double> @llvm.experimental.constrained.ceil.v4f64(<4 x double>, metadata)
 
-define <8 x double> @ceil_v8f64(<8 x double> %x) {
+define <8 x double> @ceil_v8f64(<8 x double> %x) strictfp {
 ; CHECK-LABEL: ceil_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll
index 99169337c86a7a..53018939fc6eb4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ffloor-constrained-sdnode.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-define <1 x half> @floor_v1f16(<1 x half> %x) {
+define <1 x half> @floor_v1f16(<1 x half> %x) strictfp {
 ; CHECK-LABEL: floor_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -26,7 +26,7 @@ define <1 x half> @floor_v1f16(<1 x half> %x) {
 }
 declare <1 x half> @llvm.experimental.constrained.floor.v1f16(<1 x half>, metadata)
 
-define <2 x half> @floor_v2f16(<2 x half> %x) {
+define <2 x half> @floor_v2f16(<2 x half> %x) strictfp {
 ; CHECK-LABEL: floor_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -48,7 +48,7 @@ define <2 x half> @floor_v2f16(<2 x half> %x) {
 }
 declare <2 x half> @llvm.experimental.constrained.floor.v2f16(<2 x half>, metadata)
 
-define <4 x half> @floor_v4f16(<4 x half> %x) {
+define <4 x half> @floor_v4f16(<4 x half> %x) strictfp {
 ; CHECK-LABEL: floor_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -70,7 +70,7 @@ define <4 x half> @floor_v4f16(<4 x half> %x) {
 }
 declare <4 x half> @llvm.experimental.constrained.floor.v4f16(<4 x half>, metadata)
 
-define <8 x half> @floor_v8f16(<8 x half> %x) {
+define <8 x half> @floor_v8f16(<8 x half> %x) strictfp {
 ; CHECK-LABEL: floor_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -92,7 +92,7 @@ define <8 x half> @floor_v8f16(<8 x half> %x) {
 }
 declare <8 x half> @llvm.experimental.constrained.floor.v8f16(<8 x half>, metadata)
 
-define <16 x half> @floor_v16f16(<16 x half> %x) {
+define <16 x half> @floor_v16f16(<16 x half> %x) strictfp {
 ; CHECK-LABEL: floor_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -114,7 +114,7 @@ define <16 x half> @floor_v16f16(<16 x half> %x) {
 }
 declare <16 x half> @llvm.experimental.constrained.floor.v16f16(<16 x half>, metadata)
 
-define <32 x half> @floor_v32f16(<32 x half> %x) {
+define <32 x half> @floor_v32f16(<32 x half> %x) strictfp {
 ; CHECK-LABEL: floor_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -137,7 +137,7 @@ define <32 x half> @floor_v32f16(<32 x half> %x) {
 }
 declare <32 x half> @llvm.experimental.constrained.floor.v32f16(<32 x half>, metadata)
 
-define <1 x float> @floor_v1f32(<1 x float> %x) {
+define <1 x float> @floor_v1f32(<1 x float> %x) strictfp {
 ; CHECK-LABEL: floor_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -159,7 +159,7 @@ define <1 x float> @floor_v1f32(<1 x float> %x) {
 }
 declare <1 x float> @llvm.experimental.constrained.floor.v1f32(<1 x float>, metadata)
 
-define <2 x float> @floor_v2f32(<2 x float> %x) {
+define <2 x float> @floor_v2f32(<2 x float> %x) strictfp {
 ; CHECK-LABEL: floor_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -181,7 +181,7 @@ define <2 x float> @floor_v2f32(<2 x float> %x) {
 }
 declare <2 x float> @llvm.experimental.constrained.floor.v2f32(<2 x float>, metadata)
 
-define <4 x float> @floor_v4f32(<4 x float> %x) {
+define <4 x float> @floor_v4f32(<4 x float> %x) strictfp {
 ; CHECK-LABEL: floor_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -203,7 +203,7 @@ define <4 x float> @floor_v4f32(<4 x float> %x) {
 }
 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
 
-define <8 x float> @floor_v8f32(<8 x float> %x) {
+define <8 x float> @floor_v8f32(<8 x float> %x) strictfp {
 ; CHECK-LABEL: floor_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -225,7 +225,7 @@ define <8 x float> @floor_v8f32(<8 x float> %x) {
 }
 declare <8 x float> @llvm.experimental.constrained.floor.v8f32(<8 x float>, metadata)
 
-define <16 x float> @floor_v16f32(<16 x float> %x) {
+define <16 x float> @floor_v16f32(<16 x float> %x) strictfp {
 ; CHECK-LABEL: floor_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -247,7 +247,7 @@ define <16 x float> @floor_v16f32(<16 x float> %x) {
 }
 declare <16 x float> @llvm.experimental.constrained.floor.v16f32(<16 x float>, metadata)
 
-define <1 x double> @floor_v1f64(<1 x double> %x) {
+define <1 x double> @floor_v1f64(<1 x double> %x) strictfp {
 ; CHECK-LABEL: floor_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -269,7 +269,7 @@ define <1 x double> @floor_v1f64(<1 x double> %x) {
 }
 declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
 
-define <2 x double> @floor_v2f64(<2 x double> %x) {
+define <2 x double> @floor_v2f64(<2 x double> %x) strictfp {
 ; CHECK-LABEL: floor_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -291,7 +291,7 @@ define <2 x double> @floor_v2f64(<2 x double> %x) {
 }
 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
 
-define <4 x double> @floor_v4f64(<4 x double> %x) {
+define <4 x double> @floor_v4f64(<4 x double> %x) strictfp {
 ; CHECK-LABEL: floor_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -313,7 +313,7 @@ define <4 x double> @floor_v4f64(<4 x double> %x) {
 }
 declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata)
 
-define <8 x double> @floor_v8f64(<8 x double> %x) {
+define <8 x double> @floor_v8f64(<8 x double> %x) strictfp {
 ; CHECK-LABEL: floor_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll
index 441271d02042b4..1b50214bbf164d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fnearbyint-constrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 declare <2 x half> @llvm.experimental.constrained.nearbyint.v2f16(<2 x half>, metadata, metadata)
 
-define <2 x half> @nearbyint_v2f16(<2 x half> %v) {
+define <2 x half> @nearbyint_v2f16(<2 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <2 x half> @nearbyint_v2f16(<2 x half> %v) {
 
 declare <4 x half> @llvm.experimental.constrained.nearbyint.v4f16(<4 x half>, metadata, metadata)
 
-define <4 x half> @nearbyint_v4f16(<4 x half> %v) {
+define <4 x half> @nearbyint_v4f16(<4 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -52,7 +52,7 @@ define <4 x half> @nearbyint_v4f16(<4 x half> %v) {
 
 declare <8 x half> @llvm.experimental.constrained.nearbyint.v8f16(<8 x half>, metadata, metadata)
 
-define <8 x half> @nearbyint_v8f16(<8 x half> %v) {
+define <8 x half> @nearbyint_v8f16(<8 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -75,7 +75,7 @@ define <8 x half> @nearbyint_v8f16(<8 x half> %v) {
 
 declare <16 x half> @llvm.experimental.constrained.nearbyint.v16f16(<16 x half>, metadata, metadata)
 
-define <16 x half> @nearbyint_v16f16(<16 x half> %v) {
+define <16 x half> @nearbyint_v16f16(<16 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -98,7 +98,7 @@ define <16 x half> @nearbyint_v16f16(<16 x half> %v) {
 
 declare <32 x half> @llvm.experimental.constrained.nearbyint.v32f16(<32 x half>, metadata, metadata)
 
-define <32 x half> @nearbyint_v32f16(<32 x half> %v) {
+define <32 x half> @nearbyint_v32f16(<32 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -122,7 +122,7 @@ define <32 x half> @nearbyint_v32f16(<32 x half> %v) {
 
 declare <2 x float> @llvm.experimental.constrained.nearbyint.v2f32(<2 x float>, metadata, metadata)
 
-define <2 x float> @nearbyint_v2f32(<2 x float> %v) {
+define <2 x float> @nearbyint_v2f32(<2 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -145,7 +145,7 @@ define <2 x float> @nearbyint_v2f32(<2 x float> %v) {
 
 declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
 
-define <4 x float> @nearbyint_v4f32(<4 x float> %v) {
+define <4 x float> @nearbyint_v4f32(<4 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -168,7 +168,7 @@ define <4 x float> @nearbyint_v4f32(<4 x float> %v) {
 
 declare <8 x float> @llvm.experimental.constrained.nearbyint.v8f32(<8 x float>, metadata, metadata)
 
-define <8 x float> @nearbyint_v8f32(<8 x float> %v) {
+define <8 x float> @nearbyint_v8f32(<8 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -191,7 +191,7 @@ define <8 x float> @nearbyint_v8f32(<8 x float> %v) {
 
 declare <16 x float> @llvm.experimental.constrained.nearbyint.v16f32(<16 x float>, metadata, metadata)
 
-define <16 x float> @nearbyint_v16f32(<16 x float> %v) {
+define <16 x float> @nearbyint_v16f32(<16 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -214,7 +214,7 @@ define <16 x float> @nearbyint_v16f32(<16 x float> %v) {
 
 declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
 
-define <2 x double> @nearbyint_v2f64(<2 x double> %v) {
+define <2 x double> @nearbyint_v2f64(<2 x double> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -237,7 +237,7 @@ define <2 x double> @nearbyint_v2f64(<2 x double> %v) {
 
 declare <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(<4 x double>, metadata, metadata)
 
-define <4 x double> @nearbyint_v4f64(<4 x double> %v) {
+define <4 x double> @nearbyint_v4f64(<4 x double> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -260,7 +260,7 @@ define <4 x double> @nearbyint_v4f64(<4 x double> %v) {
 
 declare <8 x double> @llvm.experimental.constrained.nearbyint.v8f64(<8 x double>, metadata, metadata)
 
-define <8 x double> @nearbyint_v8f64(<8 x double> %v) {
+define <8 x double> @nearbyint_v8f64(<8 x double> %v) strictfp {
 ; CHECK-LABEL: nearbyint_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll
index 866401fba662c6..f189354237ee3a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fround-costrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 ; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type.
 
-define <1 x half> @round_v1f16(<1 x half> %x) {
+define <1 x half> @round_v1f16(<1 x half> %x) strictfp {
 ; CHECK-LABEL: round_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -28,7 +28,7 @@ define <1 x half> @round_v1f16(<1 x half> %x) {
 }
 declare <1 x half> @llvm.experimental.constrained.round.v1f16(<1 x half>, metadata)
 
-define <2 x half> @round_v2f16(<2 x half> %x) {
+define <2 x half> @round_v2f16(<2 x half> %x) strictfp {
 ; CHECK-LABEL: round_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -50,7 +50,7 @@ define <2 x half> @round_v2f16(<2 x half> %x) {
 }
 declare <2 x half> @llvm.experimental.constrained.round.v2f16(<2 x half>, metadata)
 
-define <4 x half> @round_v4f16(<4 x half> %x) {
+define <4 x half> @round_v4f16(<4 x half> %x) strictfp {
 ; CHECK-LABEL: round_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -72,7 +72,7 @@ define <4 x half> @round_v4f16(<4 x half> %x) {
 }
 declare <4 x half> @llvm.experimental.constrained.round.v4f16(<4 x half>, metadata)
 
-define <8 x half> @round_v8f16(<8 x half> %x) {
+define <8 x half> @round_v8f16(<8 x half> %x) strictfp {
 ; CHECK-LABEL: round_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -94,7 +94,7 @@ define <8 x half> @round_v8f16(<8 x half> %x) {
 }
 declare <8 x half> @llvm.experimental.constrained.round.v8f16(<8 x half>, metadata)
 
-define <16 x half> @round_v16f16(<16 x half> %x) {
+define <16 x half> @round_v16f16(<16 x half> %x) strictfp {
 ; CHECK-LABEL: round_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -116,7 +116,7 @@ define <16 x half> @round_v16f16(<16 x half> %x) {
 }
 declare <16 x half> @llvm.experimental.constrained.round.v16f16(<16 x half>, metadata)
 
-define <32 x half> @round_v32f16(<32 x half> %x) {
+define <32 x half> @round_v32f16(<32 x half> %x) strictfp {
 ; CHECK-LABEL: round_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -139,7 +139,7 @@ define <32 x half> @round_v32f16(<32 x half> %x) {
 }
 declare <32 x half> @llvm.experimental.constrained.round.v32f16(<32 x half>, metadata)
 
-define <1 x float> @round_v1f32(<1 x float> %x) {
+define <1 x float> @round_v1f32(<1 x float> %x) strictfp {
 ; CHECK-LABEL: round_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -161,7 +161,7 @@ define <1 x float> @round_v1f32(<1 x float> %x) {
 }
 declare <1 x float> @llvm.experimental.constrained.round.v1f32(<1 x float>, metadata)
 
-define <2 x float> @round_v2f32(<2 x float> %x) {
+define <2 x float> @round_v2f32(<2 x float> %x) strictfp {
 ; CHECK-LABEL: round_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -183,7 +183,7 @@ define <2 x float> @round_v2f32(<2 x float> %x) {
 }
 declare <2 x float> @llvm.experimental.constrained.round.v2f32(<2 x float>, metadata)
 
-define <4 x float> @round_v4f32(<4 x float> %x) {
+define <4 x float> @round_v4f32(<4 x float> %x) strictfp {
 ; CHECK-LABEL: round_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -205,7 +205,7 @@ define <4 x float> @round_v4f32(<4 x float> %x) {
 }
 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
 
-define <8 x float> @round_v8f32(<8 x float> %x) {
+define <8 x float> @round_v8f32(<8 x float> %x) strictfp {
 ; CHECK-LABEL: round_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -227,7 +227,7 @@ define <8 x float> @round_v8f32(<8 x float> %x) {
 }
 declare <8 x float> @llvm.experimental.constrained.round.v8f32(<8 x float>, metadata)
 
-define <16 x float> @round_v16f32(<16 x float> %x) {
+define <16 x float> @round_v16f32(<16 x float> %x) strictfp {
 ; CHECK-LABEL: round_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -249,7 +249,7 @@ define <16 x float> @round_v16f32(<16 x float> %x) {
 }
 declare <16 x float> @llvm.experimental.constrained.round.v16f32(<16 x float>, metadata)
 
-define <1 x double> @round_v1f64(<1 x double> %x) {
+define <1 x double> @round_v1f64(<1 x double> %x) strictfp {
 ; CHECK-LABEL: round_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -271,7 +271,7 @@ define <1 x double> @round_v1f64(<1 x double> %x) {
 }
 declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
 
-define <2 x double> @round_v2f64(<2 x double> %x) {
+define <2 x double> @round_v2f64(<2 x double> %x) strictfp {
 ; CHECK-LABEL: round_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -293,7 +293,7 @@ define <2 x double> @round_v2f64(<2 x double> %x) {
 }
 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
 
-define <4 x double> @round_v4f64(<4 x double> %x) {
+define <4 x double> @round_v4f64(<4 x double> %x) strictfp {
 ; CHECK-LABEL: round_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -315,7 +315,7 @@ define <4 x double> @round_v4f64(<4 x double> %x) {
 }
 declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata)
 
-define <8 x double> @round_v8f64(<8 x double> %x) {
+define <8 x double> @round_v8f64(<8 x double> %x) strictfp {
 ; CHECK-LABEL: round_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll
index b8ab0c71f98a9c..11920c7c31c981 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-froundeven-constrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 ; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type.
 
-define <1 x half> @roundeven_v1f16(<1 x half> %x) {
+define <1 x half> @roundeven_v1f16(<1 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -28,7 +28,7 @@ define <1 x half> @roundeven_v1f16(<1 x half> %x) {
 }
 declare <1 x half> @llvm.experimental.constrained.roundeven.v1f16(<1 x half>, metadata)
 
-define <2 x half> @roundeven_v2f16(<2 x half> %x) {
+define <2 x half> @roundeven_v2f16(<2 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -50,7 +50,7 @@ define <2 x half> @roundeven_v2f16(<2 x half> %x) {
 }
 declare <2 x half> @llvm.experimental.constrained.roundeven.v2f16(<2 x half>, metadata)
 
-define <4 x half> @roundeven_v4f16(<4 x half> %x) {
+define <4 x half> @roundeven_v4f16(<4 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -72,7 +72,7 @@ define <4 x half> @roundeven_v4f16(<4 x half> %x) {
 }
 declare <4 x half> @llvm.experimental.constrained.roundeven.v4f16(<4 x half>, metadata)
 
-define <8 x half> @roundeven_v8f16(<8 x half> %x) {
+define <8 x half> @roundeven_v8f16(<8 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -94,7 +94,7 @@ define <8 x half> @roundeven_v8f16(<8 x half> %x) {
 }
 declare <8 x half> @llvm.experimental.constrained.roundeven.v8f16(<8 x half>, metadata)
 
-define <16 x half> @roundeven_v16f16(<16 x half> %x) {
+define <16 x half> @roundeven_v16f16(<16 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -116,7 +116,7 @@ define <16 x half> @roundeven_v16f16(<16 x half> %x) {
 }
 declare <16 x half> @llvm.experimental.constrained.roundeven.v16f16(<16 x half>, metadata)
 
-define <32 x half> @roundeven_v32f16(<32 x half> %x) {
+define <32 x half> @roundeven_v32f16(<32 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -139,7 +139,7 @@ define <32 x half> @roundeven_v32f16(<32 x half> %x) {
 }
 declare <32 x half> @llvm.experimental.constrained.roundeven.v32f16(<32 x half>, metadata)
 
-define <1 x float> @roundeven_v1f32(<1 x float> %x) {
+define <1 x float> @roundeven_v1f32(<1 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -161,7 +161,7 @@ define <1 x float> @roundeven_v1f32(<1 x float> %x) {
 }
 declare <1 x float> @llvm.experimental.constrained.roundeven.v1f32(<1 x float>, metadata)
 
-define <2 x float> @roundeven_v2f32(<2 x float> %x) {
+define <2 x float> @roundeven_v2f32(<2 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -183,7 +183,7 @@ define <2 x float> @roundeven_v2f32(<2 x float> %x) {
 }
 declare <2 x float> @llvm.experimental.constrained.roundeven.v2f32(<2 x float>, metadata)
 
-define <4 x float> @roundeven_v4f32(<4 x float> %x) {
+define <4 x float> @roundeven_v4f32(<4 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -205,7 +205,7 @@ define <4 x float> @roundeven_v4f32(<4 x float> %x) {
 }
 declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
 
-define <8 x float> @roundeven_v8f32(<8 x float> %x) {
+define <8 x float> @roundeven_v8f32(<8 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -227,7 +227,7 @@ define <8 x float> @roundeven_v8f32(<8 x float> %x) {
 }
 declare <8 x float> @llvm.experimental.constrained.roundeven.v8f32(<8 x float>, metadata)
 
-define <16 x float> @roundeven_v16f32(<16 x float> %x) {
+define <16 x float> @roundeven_v16f32(<16 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -249,7 +249,7 @@ define <16 x float> @roundeven_v16f32(<16 x float> %x) {
 }
 declare <16 x float> @llvm.experimental.constrained.roundeven.v16f32(<16 x float>, metadata)
 
-define <1 x double> @roundeven_v1f64(<1 x double> %x) {
+define <1 x double> @roundeven_v1f64(<1 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -271,7 +271,7 @@ define <1 x double> @roundeven_v1f64(<1 x double> %x) {
 }
 declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
 
-define <2 x double> @roundeven_v2f64(<2 x double> %x) {
+define <2 x double> @roundeven_v2f64(<2 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -293,7 +293,7 @@ define <2 x double> @roundeven_v2f64(<2 x double> %x) {
 }
 declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
 
-define <4 x double> @roundeven_v4f64(<4 x double> %x) {
+define <4 x double> @roundeven_v4f64(<4 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -315,7 +315,7 @@ define <4 x double> @roundeven_v4f64(<4 x double> %x) {
 }
 declare <4 x double> @llvm.experimental.constrained.roundeven.v4f64(<4 x double>, metadata)
 
-define <8 x double> @roundeven_v8f64(<8 x double> %x) {
+define <8 x double> @roundeven_v8f64(<8 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
index c09c19f8e8b1d5..f16581444afca5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ftrunc-constrained-sdnode.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-define <1 x half> @trunc_v1f16(<1 x half> %x) {
+define <1 x half> @trunc_v1f16(<1 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -24,7 +24,7 @@ define <1 x half> @trunc_v1f16(<1 x half> %x) {
 }
 declare <1 x half> @llvm.experimental.constrained.trunc.v1f16(<1 x half>, metadata)
 
-define <2 x half> @trunc_v2f16(<2 x half> %x) {
+define <2 x half> @trunc_v2f16(<2 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -44,7 +44,7 @@ define <2 x half> @trunc_v2f16(<2 x half> %x) {
 }
 declare <2 x half> @llvm.experimental.constrained.trunc.v2f16(<2 x half>, metadata)
 
-define <4 x half> @trunc_v4f16(<4 x half> %x) {
+define <4 x half> @trunc_v4f16(<4 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -64,7 +64,7 @@ define <4 x half> @trunc_v4f16(<4 x half> %x) {
 }
 declare <4 x half> @llvm.experimental.constrained.trunc.v4f16(<4 x half>, metadata)
 
-define <8 x half> @trunc_v8f16(<8 x half> %x) {
+define <8 x half> @trunc_v8f16(<8 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -84,7 +84,7 @@ define <8 x half> @trunc_v8f16(<8 x half> %x) {
 }
 declare <8 x half> @llvm.experimental.constrained.trunc.v8f16(<8 x half>, metadata)
 
-define <16 x half> @trunc_v16f16(<16 x half> %x) {
+define <16 x half> @trunc_v16f16(<16 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -104,7 +104,7 @@ define <16 x half> @trunc_v16f16(<16 x half> %x) {
 }
 declare <16 x half> @llvm.experimental.constrained.trunc.v16f16(<16 x half>, metadata)
 
-define <32 x half> @trunc_v32f16(<32 x half> %x) {
+define <32 x half> @trunc_v32f16(<32 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -125,7 +125,7 @@ define <32 x half> @trunc_v32f16(<32 x half> %x) {
 }
 declare <32 x half> @llvm.experimental.constrained.trunc.v32f16(<32 x half>, metadata)
 
-define <1 x float> @trunc_v1f32(<1 x float> %x) {
+define <1 x float> @trunc_v1f32(<1 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -145,7 +145,7 @@ define <1 x float> @trunc_v1f32(<1 x float> %x) {
 }
 declare <1 x float> @llvm.experimental.constrained.trunc.v1f32(<1 x float>, metadata)
 
-define <2 x float> @trunc_v2f32(<2 x float> %x) {
+define <2 x float> @trunc_v2f32(<2 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -165,7 +165,7 @@ define <2 x float> @trunc_v2f32(<2 x float> %x) {
 }
 declare <2 x float> @llvm.experimental.constrained.trunc.v2f32(<2 x float>, metadata)
 
-define <4 x float> @trunc_v4f32(<4 x float> %x) {
+define <4 x float> @trunc_v4f32(<4 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -185,7 +185,7 @@ define <4 x float> @trunc_v4f32(<4 x float> %x) {
 }
 declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
 
-define <8 x float> @trunc_v8f32(<8 x float> %x) {
+define <8 x float> @trunc_v8f32(<8 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -205,7 +205,7 @@ define <8 x float> @trunc_v8f32(<8 x float> %x) {
 }
 declare <8 x float> @llvm.experimental.constrained.trunc.v8f32(<8 x float>, metadata)
 
-define <16 x float> @trunc_v16f32(<16 x float> %x) {
+define <16 x float> @trunc_v16f32(<16 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -225,7 +225,7 @@ define <16 x float> @trunc_v16f32(<16 x float> %x) {
 }
 declare <16 x float> @llvm.experimental.constrained.trunc.v16f32(<16 x float>, metadata)
 
-define <1 x double> @trunc_v1f64(<1 x double> %x) {
+define <1 x double> @trunc_v1f64(<1 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -245,7 +245,7 @@ define <1 x double> @trunc_v1f64(<1 x double> %x) {
 }
 declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
 
-define <2 x double> @trunc_v2f64(<2 x double> %x) {
+define <2 x double> @trunc_v2f64(<2 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -265,7 +265,7 @@ define <2 x double> @trunc_v2f64(<2 x double> %x) {
 }
 declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
 
-define <4 x double> @trunc_v4f64(<4 x double> %x) {
+define <4 x double> @trunc_v4f64(<4 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -285,7 +285,7 @@ define <4 x double> @trunc_v4f64(<4 x double> %x) {
 }
 declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata)
 
-define <8 x double> @trunc_v8f64(<8 x double> %x) {
+define <8 x double> @trunc_v8f64(<8 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll
index e8ad1058a6d3fa..7c4682bac4b1f2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half>, <2 x half>, metadata, metadata)
-define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <2 x half> %vc
 }
 
-define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b) {
+define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.fadd.v4f16(<4 x half>, <4 x half>, metadata, metadata)
-define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <4 x half> %vc
 }
 
-define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b) {
+define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -53,7 +53,7 @@ define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.fadd.v8f16(<8 x half>, <8 x half>, metadata, metadata)
-define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <8 x half> %vc
 }
 
-define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b) {
+define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -77,7 +77,7 @@ define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.fadd.v16f16(<16 x half>, <16 x half>, metadata, metadata)
-define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -88,7 +88,7 @@ entry:
   ret <16 x half> %vc
 }
 
-define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b) {
+define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -101,7 +101,7 @@ define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.fadd.v32f16(<32 x half>, <32 x half>, metadata, metadata)
-define <32 x half> @vfadd_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+define <32 x half> @vfadd_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a0, 32
@@ -113,7 +113,7 @@ entry:
   ret <32 x half> %vc
 }
 
-define <32 x half> @vfadd_vf_v32f16(<32 x half> %va, half %b) {
+define <32 x half> @vfadd_vf_v32f16(<32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -127,7 +127,7 @@ define <32 x half> @vfadd_vf_v32f16(<32 x half> %va, half %b) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata)
-define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -138,7 +138,7 @@ entry:
   ret <2 x float> %vc
 }
 
-define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b) {
+define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -151,7 +151,7 @@ define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -162,7 +162,7 @@ entry:
   ret <4 x float> %vc
 }
 
-define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b) {
+define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -175,7 +175,7 @@ define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata)
-define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -186,7 +186,7 @@ entry:
   ret <8 x float> %vc
 }
 
-define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b) {
+define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -199,7 +199,7 @@ define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
-define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -210,7 +210,7 @@ entry:
   ret <16 x float> %vc
 }
 
-define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b) {
+define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -223,7 +223,7 @@ define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -234,7 +234,7 @@ entry:
   ret <2 x double> %vc
 }
 
-define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b) {
+define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -247,7 +247,7 @@ define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
-define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -258,7 +258,7 @@ entry:
   ret <4 x double> %vc
 }
 
-define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b) {
+define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -271,7 +271,7 @@ define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata)
-define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_v8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -282,7 +282,7 @@ entry:
   ret <8 x double> %vc
 }
 
-define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b) {
+define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll
index 5f66edf64cd8a6..fb9612d0950401 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <2 x half> @llvm.experimental.constrained.fdiv.v2f16(<2 x half>, <2 x half>, metadata, metadata)
-define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <2 x half> %vc
 }
 
-define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b) {
+define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.fdiv.v4f16(<4 x half>, <4 x half>, metadata, metadata)
-define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <4 x half> %vc
 }
 
-define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b) {
+define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -53,7 +53,7 @@ define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.fdiv.v8f16(<8 x half>, <8 x half>, metadata, metadata)
-define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <8 x half> %vc
 }
 
-define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b) {
+define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -76,7 +76,7 @@ define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b) {
   ret <8 x half> %vc
 }
 
-define <8 x half> @vfdiv_fv_v8f16(<8 x half> %va, half %b) {
+define <8 x half> @vfdiv_fv_v8f16(<8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_fv_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -89,7 +89,7 @@ define <8 x half> @vfdiv_fv_v8f16(<8 x half> %va, half %b) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.fdiv.v16f16(<16 x half>, <16 x half>, metadata, metadata)
-define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -100,7 +100,7 @@ entry:
   ret <16 x half> %vc
 }
 
-define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b) {
+define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.fdiv.v32f16(<32 x half>, <32 x half>, metadata, metadata)
-define <32 x half> @vfdiv_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+define <32 x half> @vfdiv_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a0, 32
@@ -125,7 +125,7 @@ entry:
   ret <32 x half> %vc
 }
 
-define <32 x half> @vfdiv_vf_v32f16(<32 x half> %va, half %b) {
+define <32 x half> @vfdiv_vf_v32f16(<32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -139,7 +139,7 @@ define <32 x half> @vfdiv_vf_v32f16(<32 x half> %va, half %b) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float>, <2 x float>, metadata, metadata)
-define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -150,7 +150,7 @@ entry:
   ret <2 x float> %vc
 }
 
-define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b) {
+define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -163,7 +163,7 @@ define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -174,7 +174,7 @@ entry:
   ret <4 x float> %vc
 }
 
-define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b) {
+define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -187,7 +187,7 @@ define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata)
-define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -198,7 +198,7 @@ entry:
   ret <8 x float> %vc
 }
 
-define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b) {
+define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -210,7 +210,7 @@ define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b) {
   ret <8 x float> %vc
 }
 
-define <8 x float> @vfdiv_fv_v8f32(<8 x float> %va, float %b) {
+define <8 x float> @vfdiv_fv_v8f32(<8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_fv_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -223,7 +223,7 @@ define <8 x float> @vfdiv_fv_v8f32(<8 x float> %va, float %b) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata)
-define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -234,7 +234,7 @@ entry:
   ret <16 x float> %vc
 }
 
-define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b) {
+define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -247,7 +247,7 @@ define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -258,7 +258,7 @@ entry:
   ret <2 x double> %vc
 }
 
-define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b) {
+define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -271,7 +271,7 @@ define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata)
-define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -282,7 +282,7 @@ entry:
   ret <4 x double> %vc
 }
 
-define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b) {
+define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -295,7 +295,7 @@ define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata)
-define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_v8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -306,7 +306,7 @@ entry:
   ret <8 x double> %vc
 }
 
-define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b) {
+define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -318,7 +318,7 @@ define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b) {
   ret <8 x double> %vc
 }
 
-define <8 x double> @vfdiv_fv_v8f64(<8 x double> %va, double %b) {
+define <8 x double> @vfdiv_fv_v8f64(<8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_fv_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll
index 0a43a66d99c2fc..52d96fc63fadfc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmadd-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata)
 
-define <2 x half> @vfmadd_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) {
+define <2 x half> @vfmadd_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -19,7 +19,7 @@ define <2 x half> @vfmadd_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %v
   ret <2 x half> %vd
 }
 
-define <2 x half> @vfmadd_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
+define <2 x half> @vfmadd_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -33,7 +33,7 @@ define <2 x half> @vfmadd_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
 
 declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata)
 
-define <4 x half> @vfmadd_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) {
+define <4 x half> @vfmadd_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -43,7 +43,7 @@ define <4 x half> @vfmadd_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %v
   ret <4 x half> %vd
 }
 
-define <4 x half> @vfmadd_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
+define <4 x half> @vfmadd_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -57,7 +57,7 @@ define <4 x half> @vfmadd_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
 
 declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata)
 
-define <8 x half> @vfmadd_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) {
+define <8 x half> @vfmadd_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -67,7 +67,7 @@ define <8 x half> @vfmadd_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %v
   ret <8 x half> %vd
 }
 
-define <8 x half> @vfmadd_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
+define <8 x half> @vfmadd_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -81,7 +81,7 @@ define <8 x half> @vfmadd_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
 
 declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata)
 
-define <16 x half> @vfmadd_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) {
+define <16 x half> @vfmadd_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -91,7 +91,7 @@ define <16 x half> @vfmadd_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x hal
   ret <16 x half> %vd
 }
 
-define <16 x half> @vfmadd_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) {
+define <16 x half> @vfmadd_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -105,7 +105,7 @@ define <16 x half> @vfmadd_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c)
 
 declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata)
 
-define <32 x half> @vfmadd_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) {
+define <32 x half> @vfmadd_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -116,7 +116,7 @@ define <32 x half> @vfmadd_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x hal
   ret <32 x half> %vd
 }
 
-define <32 x half> @vfmadd_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) {
+define <32 x half> @vfmadd_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -131,7 +131,7 @@ define <32 x half> @vfmadd_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c)
 
 declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata)
 
-define <2 x float> @vfmadd_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) {
+define <2 x float> @vfmadd_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -141,7 +141,7 @@ define <2 x float> @vfmadd_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float
   ret <2 x float> %vd
 }
 
-define <2 x float> @vfmadd_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) {
+define <2 x float> @vfmadd_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -155,7 +155,7 @@ define <2 x float> @vfmadd_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c)
 
 declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
 
-define <4 x float> @vfmadd_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) {
+define <4 x float> @vfmadd_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -165,7 +165,7 @@ define <4 x float> @vfmadd_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float
   ret <4 x float> %vd
 }
 
-define <4 x float> @vfmadd_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) {
+define <4 x float> @vfmadd_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -179,7 +179,7 @@ define <4 x float> @vfmadd_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c)
 
 declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
 
-define <8 x float> @vfmadd_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) {
+define <8 x float> @vfmadd_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -189,7 +189,7 @@ define <8 x float> @vfmadd_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float
   ret <8 x float> %vd
 }
 
-define <8 x float> @vfmadd_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) {
+define <8 x float> @vfmadd_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -203,7 +203,7 @@ define <8 x float> @vfmadd_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c)
 
 declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata)
 
-define <16 x float> @vfmadd_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
+define <16 x float> @vfmadd_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -213,7 +213,7 @@ define <16 x float> @vfmadd_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x
   ret <16 x float> %vd
 }
 
-define <16 x float> @vfmadd_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) {
+define <16 x float> @vfmadd_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -227,7 +227,7 @@ define <16 x float> @vfmadd_vf_v16f32(<16 x float> %va, <16 x float> %vb, float
 
 declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
 
-define <2 x double> @vfmadd_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) {
+define <2 x double> @vfmadd_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -237,7 +237,7 @@ define <2 x double> @vfmadd_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x do
   ret <2 x double> %vd
 }
 
-define <2 x double> @vfmadd_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) {
+define <2 x double> @vfmadd_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -251,7 +251,7 @@ define <2 x double> @vfmadd_vf_v2f64(<2 x double> %va, <2 x double> %vb, double
 
 declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
 
-define <4 x double> @vfmadd_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) {
+define <4 x double> @vfmadd_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -261,7 +261,7 @@ define <4 x double> @vfmadd_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x do
   ret <4 x double> %vd
 }
 
-define <4 x double> @vfmadd_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) {
+define <4 x double> @vfmadd_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -275,7 +275,7 @@ define <4 x double> @vfmadd_vf_v4f64(<4 x double> %va, <4 x double> %vb, double
 
 declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata)
 
-define <8 x double> @vfmadd_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) {
+define <8 x double> @vfmadd_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -285,7 +285,7 @@ define <8 x double> @vfmadd_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x do
   ret <8 x double> %vd
 }
 
-define <8 x double> @vfmadd_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) {
+define <8 x double> @vfmadd_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll
index 06fc97ca3314e4..652198b0d4469b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmsub-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata)
 
-define <2 x half> @vfmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) {
+define <2 x half> @vfmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -20,7 +20,7 @@ define <2 x half> @vfmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %v
   ret <2 x half> %vd
 }
 
-define <2 x half> @vfmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
+define <2 x half> @vfmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -35,7 +35,7 @@ define <2 x half> @vfmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
 
 declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata)
 
-define <4 x half> @vfmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) {
+define <4 x half> @vfmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -46,7 +46,7 @@ define <4 x half> @vfmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %v
   ret <4 x half> %vd
 }
 
-define <4 x half> @vfmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
+define <4 x half> @vfmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -61,7 +61,7 @@ define <4 x half> @vfmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
 
 declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata)
 
-define <8 x half> @vfmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) {
+define <8 x half> @vfmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -72,7 +72,7 @@ define <8 x half> @vfmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %v
   ret <8 x half> %vd
 }
 
-define <8 x half> @vfmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
+define <8 x half> @vfmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -87,7 +87,7 @@ define <8 x half> @vfmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
 
 declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata)
 
-define <16 x half> @vfmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) {
+define <16 x half> @vfmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -98,7 +98,7 @@ define <16 x half> @vfmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x hal
   ret <16 x half> %vd
 }
 
-define <16 x half> @vfmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) {
+define <16 x half> @vfmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <16 x half> @vfmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c)
 
 declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata)
 
-define <32 x half> @vfmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) {
+define <32 x half> @vfmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -125,7 +125,7 @@ define <32 x half> @vfmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x hal
   ret <32 x half> %vd
 }
 
-define <32 x half> @vfmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) {
+define <32 x half> @vfmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -141,7 +141,7 @@ define <32 x half> @vfmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c)
 
 declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata)
 
-define <2 x float> @vfmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) {
+define <2 x float> @vfmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -152,7 +152,7 @@ define <2 x float> @vfmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float
   ret <2 x float> %vd
 }
 
-define <2 x float> @vfmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) {
+define <2 x float> @vfmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -167,7 +167,7 @@ define <2 x float> @vfmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c)
 
 declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
 
-define <4 x float> @vfmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) {
+define <4 x float> @vfmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -178,7 +178,7 @@ define <4 x float> @vfmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float
   ret <4 x float> %vd
 }
 
-define <4 x float> @vfmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) {
+define <4 x float> @vfmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -193,7 +193,7 @@ define <4 x float> @vfmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c)
 
 declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
 
-define <8 x float> @vfmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) {
+define <8 x float> @vfmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -204,7 +204,7 @@ define <8 x float> @vfmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float
   ret <8 x float> %vd
 }
 
-define <8 x float> @vfmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) {
+define <8 x float> @vfmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -219,7 +219,7 @@ define <8 x float> @vfmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c)
 
 declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata)
 
-define <16 x float> @vfmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
+define <16 x float> @vfmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -230,7 +230,7 @@ define <16 x float> @vfmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x
   ret <16 x float> %vd
 }
 
-define <16 x float> @vfmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) {
+define <16 x float> @vfmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -245,7 +245,7 @@ define <16 x float> @vfmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float
 
 declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
 
-define <2 x double> @vfmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) {
+define <2 x double> @vfmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -256,7 +256,7 @@ define <2 x double> @vfmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x do
   ret <2 x double> %vd
 }
 
-define <2 x double> @vfmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) {
+define <2 x double> @vfmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -271,7 +271,7 @@ define <2 x double> @vfmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double
 
 declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
 
-define <4 x double> @vfmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) {
+define <4 x double> @vfmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -282,7 +282,7 @@ define <4 x double> @vfmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x do
   ret <4 x double> %vd
 }
 
-define <4 x double> @vfmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) {
+define <4 x double> @vfmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -297,7 +297,7 @@ define <4 x double> @vfmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double
 
 declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata)
 
-define <8 x double> @vfmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) {
+define <8 x double> @vfmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -308,7 +308,7 @@ define <8 x double> @vfmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x do
   ret <8 x double> %vd
 }
 
-define <8 x double> @vfmsub_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) {
+define <8 x double> @vfmsub_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll
index fe1c4562b5b96c..d5e96c88f93887 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <1 x half> @llvm.experimental.constrained.fmul.v1f16(<1 x half>, <1 x half>, metadata, metadata)
-define <1 x half> @vfmul_vv_v1f16(<1 x half> %va, <1 x half> %vb) {
+define <1 x half> @vfmul_vv_v1f16(<1 x half> %va, <1 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <1 x half> %vc
 }
 
-define <1 x half> @vfmul_vf_v1f16(<1 x half> %va, half %b) {
+define <1 x half> @vfmul_vf_v1f16(<1 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <1 x half> @vfmul_vf_v1f16(<1 x half> %va, half %b) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.fmul.v2f16(<2 x half>, <2 x half>, metadata, metadata)
-define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <2 x half> %vc
 }
 
-define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b) {
+define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -53,7 +53,7 @@ define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.fmul.v4f16(<4 x half>, <4 x half>, metadata, metadata)
-define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <4 x half> %vc
 }
 
-define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b) {
+define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -77,7 +77,7 @@ define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.fmul.v8f16(<8 x half>, <8 x half>, metadata, metadata)
-define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -88,7 +88,7 @@ entry:
   ret <8 x half> %vc
 }
 
-define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b) {
+define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -101,7 +101,7 @@ define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.fmul.v16f16(<16 x half>, <16 x half>, metadata, metadata)
-define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -112,7 +112,7 @@ entry:
   ret <16 x half> %vc
 }
 
-define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b) {
+define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -125,7 +125,7 @@ define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.fmul.v32f16(<32 x half>, <32 x half>, metadata, metadata)
-define <32 x half> @vfmul_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+define <32 x half> @vfmul_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a0, 32
@@ -137,7 +137,7 @@ entry:
   ret <32 x half> %vc
 }
 
-define <32 x half> @vfmul_vf_v32f16(<32 x half> %va, half %b) {
+define <32 x half> @vfmul_vf_v32f16(<32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -151,7 +151,7 @@ define <32 x half> @vfmul_vf_v32f16(<32 x half> %va, half %b) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.fmul.v1f32(<1 x float>, <1 x float>, metadata, metadata)
-define <1 x float> @vfmul_vv_v1f32(<1 x float> %va, <1 x float> %vb) {
+define <1 x float> @vfmul_vv_v1f32(<1 x float> %va, <1 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -162,7 +162,7 @@ entry:
   ret <1 x float> %vc
 }
 
-define <1 x float> @vfmul_vf_v1f32(<1 x float> %va, float %b) {
+define <1 x float> @vfmul_vf_v1f32(<1 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -175,7 +175,7 @@ define <1 x float> @vfmul_vf_v1f32(<1 x float> %va, float %b) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float>, <2 x float>, metadata, metadata)
-define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -186,7 +186,7 @@ entry:
   ret <2 x float> %vc
 }
 
-define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b) {
+define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -199,7 +199,7 @@ define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -210,7 +210,7 @@ entry:
   ret <4 x float> %vc
 }
 
-define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b) {
+define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -223,7 +223,7 @@ define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata)
-define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -234,7 +234,7 @@ entry:
   ret <8 x float> %vc
 }
 
-define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b) {
+define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -247,7 +247,7 @@ define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata)
-define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -258,7 +258,7 @@ entry:
   ret <16 x float> %vc
 }
 
-define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b) {
+define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -271,7 +271,7 @@ define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata)
-define <1 x double> @vfmul_vv_v1f64(<1 x double> %va, <1 x double> %vb) {
+define <1 x double> @vfmul_vv_v1f64(<1 x double> %va, <1 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -282,7 +282,7 @@ entry:
   ret <1 x double> %vc
 }
 
-define <1 x double> @vfmul_vf_v1f64(<1 x double> %va, double %b) {
+define <1 x double> @vfmul_vf_v1f64(<1 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -295,7 +295,7 @@ define <1 x double> @vfmul_vf_v1f64(<1 x double> %va, double %b) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -306,7 +306,7 @@ entry:
   ret <2 x double> %vc
 }
 
-define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b) {
+define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -319,7 +319,7 @@ define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata)
-define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -330,7 +330,7 @@ entry:
   ret <4 x double> %vc
 }
 
-define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b) {
+define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -343,7 +343,7 @@ define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata)
-define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_v8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -354,7 +354,7 @@ entry:
   ret <8 x double> %vc
 }
 
-define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b) {
+define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll
index d5b7b1f5d240ca..b7f5dd49b3508c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmadd-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata)
 
-define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) {
+define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -21,7 +21,7 @@ define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %
   ret <2 x half> %vd
 }
 
-define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
+define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -37,7 +37,7 @@ define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
 
 declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata)
 
-define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) {
+define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -49,7 +49,7 @@ define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %
   ret <4 x half> %vd
 }
 
-define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
+define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -65,7 +65,7 @@ define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
 
 declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata)
 
-define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) {
+define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -77,7 +77,7 @@ define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %
   ret <8 x half> %vd
 }
 
-define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
+define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -93,7 +93,7 @@ define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
 
 declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata)
 
-define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) {
+define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -105,7 +105,7 @@ define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x ha
   ret <16 x half> %vd
 }
 
-define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) {
+define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -121,7 +121,7 @@ define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c)
 
 declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata)
 
-define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) {
+define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -134,7 +134,7 @@ define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x ha
   ret <32 x half> %vd
 }
 
-define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) {
+define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -151,7 +151,7 @@ define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c)
 
 declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata)
 
-define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) {
+define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -163,7 +163,7 @@ define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x floa
   ret <2 x float> %vd
 }
 
-define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) {
+define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -179,7 +179,7 @@ define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c)
 
 declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
 
-define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) {
+define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -191,7 +191,7 @@ define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x floa
   ret <4 x float> %vd
 }
 
-define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) {
+define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -207,7 +207,7 @@ define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c)
 
 declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
 
-define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) {
+define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -219,7 +219,7 @@ define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x floa
   ret <8 x float> %vd
 }
 
-define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) {
+define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -235,7 +235,7 @@ define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c)
 
 declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata)
 
-define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
+define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -247,7 +247,7 @@ define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x
   ret <16 x float> %vd
 }
 
-define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) {
+define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -263,7 +263,7 @@ define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float
 
 declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
 
-define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) {
+define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -275,7 +275,7 @@ define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x d
   ret <2 x double> %vd
 }
 
-define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) {
+define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -291,7 +291,7 @@ define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double
 
 declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
 
-define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) {
+define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -303,7 +303,7 @@ define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x d
   ret <4 x double> %vd
 }
 
-define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) {
+define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -319,7 +319,7 @@ define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double
 
 declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata)
 
-define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) {
+define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -331,7 +331,7 @@ define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x d
   ret <8 x double> %vd
 }
 
-define <8 x double> @vfnmsub_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) {
+define <8 x double> @vfnmsub_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll
index 95bba28c7cec96..ace96c1a571d1d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfnmsub-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <2 x half> @llvm.experimental.constrained.fma.v2f16(<2 x half>, <2 x half>, <2 x half>, metadata, metadata)
 
-define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) {
+define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -20,7 +20,7 @@ define <2 x half> @vfnmsub_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %
   ret <2 x half> %vd
 }
 
-define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
+define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -35,7 +35,7 @@ define <2 x half> @vfnmsub_vf_v2f16(<2 x half> %va, <2 x half> %vb, half %c) {
 
 declare <4 x half> @llvm.experimental.constrained.fma.v4f16(<4 x half>, <4 x half>, <4 x half>, metadata, metadata)
 
-define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) {
+define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -46,7 +46,7 @@ define <4 x half> @vfnmsub_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %
   ret <4 x half> %vd
 }
 
-define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
+define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -61,7 +61,7 @@ define <4 x half> @vfnmsub_vf_v4f16(<4 x half> %va, <4 x half> %vb, half %c) {
 
 declare <8 x half> @llvm.experimental.constrained.fma.v8f16(<8 x half>, <8 x half>, <8 x half>, metadata, metadata)
 
-define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) {
+define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -72,7 +72,7 @@ define <8 x half> @vfnmsub_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %
   ret <8 x half> %vd
 }
 
-define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
+define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -87,7 +87,7 @@ define <8 x half> @vfnmsub_vf_v8f16(<8 x half> %va, <8 x half> %vb, half %c) {
 
 declare <16 x half> @llvm.experimental.constrained.fma.v16f16(<16 x half>, <16 x half>, <16 x half>, metadata, metadata)
 
-define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) {
+define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -98,7 +98,7 @@ define <16 x half> @vfnmsub_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x ha
   ret <16 x half> %vd
 }
 
-define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) {
+define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <16 x half> @vfnmsub_vf_v16f16(<16 x half> %va, <16 x half> %vb, half %c)
 
 declare <32 x half> @llvm.experimental.constrained.fma.v32f16(<32 x half>, <32 x half>, <32 x half>, metadata, metadata)
 
-define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) {
+define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -125,7 +125,7 @@ define <32 x half> @vfnmsub_vv_v32f16(<32 x half> %va, <32 x half> %vb, <32 x ha
   ret <32 x half> %vd
 }
 
-define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) {
+define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -141,7 +141,7 @@ define <32 x half> @vfnmsub_vf_v32f16(<32 x half> %va, <32 x half> %vb, half %c)
 
 declare <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float>, <2 x float>, <2 x float>, metadata, metadata)
 
-define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) {
+define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -152,7 +152,7 @@ define <2 x float> @vfnmsub_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x floa
   ret <2 x float> %vd
 }
 
-define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) {
+define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -167,7 +167,7 @@ define <2 x float> @vfnmsub_vf_v2f32(<2 x float> %va, <2 x float> %vb, float %c)
 
 declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
 
-define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) {
+define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -178,7 +178,7 @@ define <4 x float> @vfnmsub_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x floa
   ret <4 x float> %vd
 }
 
-define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) {
+define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -193,7 +193,7 @@ define <4 x float> @vfnmsub_vf_v4f32(<4 x float> %va, <4 x float> %vb, float %c)
 
 declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
 
-define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) {
+define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -204,7 +204,7 @@ define <8 x float> @vfnmsub_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x floa
   ret <8 x float> %vd
 }
 
-define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) {
+define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -219,7 +219,7 @@ define <8 x float> @vfnmsub_vf_v8f32(<8 x float> %va, <8 x float> %vb, float %c)
 
 declare <16 x float> @llvm.experimental.constrained.fma.v16f32(<16 x float>, <16 x float>, <16 x float>, metadata, metadata)
 
-define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
+define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -230,7 +230,7 @@ define <16 x float> @vfnmsub_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x
   ret <16 x float> %vd
 }
 
-define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) {
+define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -245,7 +245,7 @@ define <16 x float> @vfnmsub_vf_v16f32(<16 x float> %va, <16 x float> %vb, float
 
 declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
 
-define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) {
+define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -256,7 +256,7 @@ define <2 x double> @vfnmsub_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x d
   ret <2 x double> %vd
 }
 
-define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) {
+define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -271,7 +271,7 @@ define <2 x double> @vfnmsub_vf_v2f64(<2 x double> %va, <2 x double> %vb, double
 
 declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
 
-define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) {
+define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -282,7 +282,7 @@ define <4 x double> @vfnmsub_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x d
   ret <4 x double> %vd
 }
 
-define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) {
+define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -297,7 +297,7 @@ define <4 x double> @vfnmsub_vf_v4f64(<4 x double> %va, <4 x double> %vb, double
 
 declare <8 x double> @llvm.experimental.constrained.fma.v8f64(<8 x double>, <8 x double>, <8 x double>, metadata, metadata)
 
-define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) {
+define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -308,7 +308,7 @@ define <8 x double> @vfnmsub_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x d
   ret <8 x double> %vd
 }
 
-define <8 x double> @vfnmsub_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) {
+define <8 x double> @vfnmsub_vf_v8f64(<8 x double> %va, <8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll
index d853f6a531d8f4..5a89fb0c510674 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfpext-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half>, metadata)
-define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %va) {
+define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_v2f16_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -17,7 +17,7 @@ define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f16(<2 x half>, metadata)
-define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %va) {
+define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_v2f16_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -30,7 +30,7 @@ define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half>, metadata)
-define <4 x float> @vfpext_v4f16_v4f32(<4 x half> %va) {
+define <4 x float> @vfpext_v4f16_v4f32(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_v4f16_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -42,7 +42,7 @@ define <4 x float> @vfpext_v4f16_v4f32(<4 x half> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata)
-define <4 x double> @vfpext_v4f16_v4f64(<4 x half> %va) {
+define <4 x double> @vfpext_v4f16_v4f64(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_v4f16_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -55,7 +55,7 @@ define <4 x double> @vfpext_v4f16_v4f64(<4 x half> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata)
-define <8 x float> @vfpext_v8f16_v8f32(<8 x half> %va) {
+define <8 x float> @vfpext_v8f16_v8f32(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_v8f16_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -67,7 +67,7 @@ define <8 x float> @vfpext_v8f16_v8f32(<8 x half> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(<8 x half>, metadata)
-define <8 x double> @vfpext_v8f16_v8f64(<8 x half> %va) {
+define <8 x double> @vfpext_v8f16_v8f64(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_v8f16_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -80,7 +80,7 @@ define <8 x double> @vfpext_v8f16_v8f64(<8 x half> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
-define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %va) {
+define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfpext_v2f32_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -92,7 +92,7 @@ define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f32(<4 x float>, metadata)
-define <4 x double> @vfpext_v4f32_v4f64(<4 x float> %va) {
+define <4 x double> @vfpext_v4f32_v4f64(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfpext_v4f32_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -104,7 +104,7 @@ define <4 x double> @vfpext_v4f32_v4f64(<4 x float> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f32(<8 x float>, metadata)
-define <8 x double> @vfpext_v8f32_v8f64(<8 x float> %va) {
+define <8 x double> @vfpext_v8f32_v8f64(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfpext_v8f32_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
index 08304c7e7f558f..fe0c315d9717bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f16(<1 x half>, metadata)
-define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) {
+define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f16_v1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -18,7 +18,7 @@ define <1 x i1> @vfptosi_v1f16_v1i1(<1 x half> %va) {
 }
 
 declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f16(<1 x half>, metadata)
-define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) {
+define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f16_v1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -31,7 +31,7 @@ define <1 x i1> @vfptoui_v1f16_v1i1(<1 x half> %va) {
 }
 
 declare <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half>, metadata)
-define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) {
+define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) strictfp {
 ; RV32-LABEL: vfptosi_v1f16_v1i7:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
@@ -50,7 +50,7 @@ define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) {
 }
 
 declare <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half>, metadata)
-define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) {
+define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) strictfp {
 ; RV32-LABEL: vfptoui_v1f16_v1i7:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 0, e16, mf4, ta, ma
@@ -69,7 +69,7 @@ define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) {
 }
 
 declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f16(<1 x half>, metadata)
-define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) {
+define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f16_v1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -81,7 +81,7 @@ define <1 x i8> @vfptosi_v1f16_v1i8(<1 x half> %va) {
 }
 
 declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f16(<1 x half>, metadata)
-define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) {
+define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f16_v1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -93,7 +93,7 @@ define <1 x i8> @vfptoui_v1f16_v1i8(<1 x half> %va) {
 }
 
 declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f16(<1 x half>, metadata)
-define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) {
+define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f16_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -104,7 +104,7 @@ define <1 x i16> @vfptosi_v1f16_v1i16(<1 x half> %va) {
 }
 
 declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f16(<1 x half>, metadata)
-define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) {
+define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f16_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -115,7 +115,7 @@ define <1 x i16> @vfptoui_v1f16_v1i16(<1 x half> %va) {
 }
 
 declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f16(<1 x half>, metadata)
-define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) {
+define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f16_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -127,7 +127,7 @@ define <1 x i32> @vfptosi_v1f16_v1i32(<1 x half> %va) {
 }
 
 declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f16(<1 x half>, metadata)
-define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) {
+define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f16_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -139,7 +139,7 @@ define <1 x i32> @vfptoui_v1f16_v1i32(<1 x half> %va) {
 }
 
 declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f16(<1 x half>, metadata)
-define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) {
+define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f16_v1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -152,7 +152,7 @@ define <1 x i64> @vfptosi_v1f16_v1i64(<1 x half> %va) {
 }
 
 declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f16(<1 x half>, metadata)
-define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) {
+define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f16_v1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -165,7 +165,7 @@ define <1 x i64> @vfptoui_v1f16_v1i64(<1 x half> %va) {
 }
 
 declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f16(<2 x half>, metadata)
-define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) {
+define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f16_v2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -178,7 +178,7 @@ define <2 x i1> @vfptosi_v2f16_v2i1(<2 x half> %va) {
 }
 
 declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f16(<2 x half>, metadata)
-define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) {
+define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f16_v2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -191,7 +191,7 @@ define <2 x i1> @vfptoui_v2f16_v2i1(<2 x half> %va) {
 }
 
 declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f16(<2 x half>, metadata)
-define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) {
+define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f16_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -203,7 +203,7 @@ define <2 x i8> @vfptosi_v2f16_v2i8(<2 x half> %va) {
 }
 
 declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f16(<2 x half>, metadata)
-define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) {
+define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f16_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -215,7 +215,7 @@ define <2 x i8> @vfptoui_v2f16_v2i8(<2 x half> %va) {
 }
 
 declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f16(<2 x half>, metadata)
-define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) {
+define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f16_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -226,7 +226,7 @@ define <2 x i16> @vfptosi_v2f16_v2i16(<2 x half> %va) {
 }
 
 declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f16(<2 x half>, metadata)
-define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) {
+define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f16_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -237,7 +237,7 @@ define <2 x i16> @vfptoui_v2f16_v2i16(<2 x half> %va) {
 }
 
 declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f16(<2 x half>, metadata)
-define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) {
+define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f16_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -249,7 +249,7 @@ define <2 x i32> @vfptosi_v2f16_v2i32(<2 x half> %va) {
 }
 
 declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f16(<2 x half>, metadata)
-define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) {
+define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f16_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -261,7 +261,7 @@ define <2 x i32> @vfptoui_v2f16_v2i32(<2 x half> %va) {
 }
 
 declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f16(<2 x half>, metadata)
-define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) {
+define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f16_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -274,7 +274,7 @@ define <2 x i64> @vfptosi_v2f16_v2i64(<2 x half> %va) {
 }
 
 declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f16(<2 x half>, metadata)
-define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) {
+define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f16_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -287,7 +287,7 @@ define <2 x i64> @vfptoui_v2f16_v2i64(<2 x half> %va) {
 }
 
 declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f16(<4 x half>, metadata)
-define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) {
+define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f16_v4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -300,7 +300,7 @@ define <4 x i1> @vfptosi_v4f16_v4i1(<4 x half> %va) {
 }
 
 declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f16(<4 x half>, metadata)
-define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) {
+define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f16_v4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -313,7 +313,7 @@ define <4 x i1> @vfptoui_v4f16_v4i1(<4 x half> %va) {
 }
 
 declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f16(<4 x half>, metadata)
-define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) {
+define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f16_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -325,7 +325,7 @@ define <4 x i8> @vfptosi_v4f16_v4i8(<4 x half> %va) {
 }
 
 declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f16(<4 x half>, metadata)
-define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) {
+define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f16_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -337,7 +337,7 @@ define <4 x i8> @vfptoui_v4f16_v4i8(<4 x half> %va) {
 }
 
 declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f16(<4 x half>, metadata)
-define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) {
+define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f16_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -348,7 +348,7 @@ define <4 x i16> @vfptosi_v4f16_v4i16(<4 x half> %va) {
 }
 
 declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f16(<4 x half>, metadata)
-define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) {
+define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f16_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -359,7 +359,7 @@ define <4 x i16> @vfptoui_v4f16_v4i16(<4 x half> %va) {
 }
 
 declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f16(<4 x half>, metadata)
-define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) {
+define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f16_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -371,7 +371,7 @@ define <4 x i32> @vfptosi_v4f16_v4i32(<4 x half> %va) {
 }
 
 declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f16(<4 x half>, metadata)
-define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) {
+define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f16_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -383,7 +383,7 @@ define <4 x i32> @vfptoui_v4f16_v4i32(<4 x half> %va) {
 }
 
 declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f16(<4 x half>, metadata)
-define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) {
+define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f16_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -396,7 +396,7 @@ define <4 x i64> @vfptosi_v4f16_v4i64(<4 x half> %va) {
 }
 
 declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f16(<4 x half>, metadata)
-define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) {
+define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f16_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -409,7 +409,7 @@ define <4 x i64> @vfptoui_v4f16_v4i64(<4 x half> %va) {
 }
 
 declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f16(<8 x half>, metadata)
-define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) {
+define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f16_v8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -422,7 +422,7 @@ define <8 x i1> @vfptosi_v8f16_v8i1(<8 x half> %va) {
 }
 
 declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f16(<8 x half>, metadata)
-define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) {
+define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f16_v8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -435,7 +435,7 @@ define <8 x i1> @vfptoui_v8f16_v8i1(<8 x half> %va) {
 }
 
 declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f16(<8 x half>, metadata)
-define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) {
+define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f16_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -447,7 +447,7 @@ define <8 x i8> @vfptosi_v8f16_v8i8(<8 x half> %va) {
 }
 
 declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f16(<8 x half>, metadata)
-define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) {
+define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f16_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -459,7 +459,7 @@ define <8 x i8> @vfptoui_v8f16_v8i8(<8 x half> %va) {
 }
 
 declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f16(<8 x half>, metadata)
-define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) {
+define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f16_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -470,7 +470,7 @@ define <8 x i16> @vfptosi_v8f16_v8i16(<8 x half> %va) {
 }
 
 declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f16(<8 x half>, metadata)
-define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) {
+define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f16_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -481,7 +481,7 @@ define <8 x i16> @vfptoui_v8f16_v8i16(<8 x half> %va) {
 }
 
 declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f16(<8 x half>, metadata)
-define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) {
+define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f16_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -493,7 +493,7 @@ define <8 x i32> @vfptosi_v8f16_v8i32(<8 x half> %va) {
 }
 
 declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f16(<8 x half>, metadata)
-define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) {
+define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f16_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -505,7 +505,7 @@ define <8 x i32> @vfptoui_v8f16_v8i32(<8 x half> %va) {
 }
 
 declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f16(<8 x half>, metadata)
-define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) {
+define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f16_v8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -518,7 +518,7 @@ define <8 x i64> @vfptosi_v8f16_v8i64(<8 x half> %va) {
 }
 
 declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f16(<8 x half>, metadata)
-define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) {
+define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f16_v8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -531,7 +531,7 @@ define <8 x i64> @vfptoui_v8f16_v8i64(<8 x half> %va) {
 }
 
 declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f16(<16 x half>, metadata)
-define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) {
+define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f16_v16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -544,7 +544,7 @@ define <16 x i1> @vfptosi_v16f16_v16i1(<16 x half> %va) {
 }
 
 declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f16(<16 x half>, metadata)
-define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) {
+define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f16_v16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -557,7 +557,7 @@ define <16 x i1> @vfptoui_v16f16_v16i1(<16 x half> %va) {
 }
 
 declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f16(<16 x half>, metadata)
-define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) {
+define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f16_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -569,7 +569,7 @@ define <16 x i8> @vfptosi_v16f16_v16i8(<16 x half> %va) {
 }
 
 declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f16(<16 x half>, metadata)
-define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) {
+define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f16_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -581,7 +581,7 @@ define <16 x i8> @vfptoui_v16f16_v16i8(<16 x half> %va) {
 }
 
 declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f16(<16 x half>, metadata)
-define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) {
+define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f16_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -592,7 +592,7 @@ define <16 x i16> @vfptosi_v16f16_v16i16(<16 x half> %va) {
 }
 
 declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f16(<16 x half>, metadata)
-define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) {
+define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f16_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -603,7 +603,7 @@ define <16 x i16> @vfptoui_v16f16_v16i16(<16 x half> %va) {
 }
 
 declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f16(<16 x half>, metadata)
-define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) {
+define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f16_v16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -615,7 +615,7 @@ define <16 x i32> @vfptosi_v16f16_v16i32(<16 x half> %va) {
 }
 
 declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f16(<16 x half>, metadata)
-define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) {
+define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f16_v16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -627,7 +627,7 @@ define <16 x i32> @vfptoui_v16f16_v16i32(<16 x half> %va) {
 }
 
 declare <32 x i1> @llvm.experimental.constrained.fptosi.v32i1.v32f16(<32 x half>, metadata)
-define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) {
+define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v32f16_v32i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -641,7 +641,7 @@ define <32 x i1> @vfptosi_v32f16_v32i1(<32 x half> %va) {
 }
 
 declare <32 x i1> @llvm.experimental.constrained.fptoui.v32i1.v32f16(<32 x half>, metadata)
-define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) {
+define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v32f16_v32i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -655,7 +655,7 @@ define <32 x i1> @vfptoui_v32f16_v32i1(<32 x half> %va) {
 }
 
 declare <32 x i8> @llvm.experimental.constrained.fptosi.v32i8.v32f16(<32 x half>, metadata)
-define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) {
+define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v32f16_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -668,7 +668,7 @@ define <32 x i8> @vfptosi_v32f16_v32i8(<32 x half> %va) {
 }
 
 declare <32 x i8> @llvm.experimental.constrained.fptoui.v32i8.v32f16(<32 x half>, metadata)
-define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) {
+define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v32f16_v32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -681,7 +681,7 @@ define <32 x i8> @vfptoui_v32f16_v32i8(<32 x half> %va) {
 }
 
 declare <32 x i16> @llvm.experimental.constrained.fptosi.v32i16.v32f16(<32 x half>, metadata)
-define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) {
+define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v32f16_v32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -693,7 +693,7 @@ define <32 x i16> @vfptosi_v32f16_v32i16(<32 x half> %va) {
 }
 
 declare <32 x i16> @llvm.experimental.constrained.fptoui.v32i16.v32f16(<32 x half>, metadata)
-define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) {
+define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v32f16_v32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -705,7 +705,7 @@ define <32 x i16> @vfptoui_v32f16_v32i16(<32 x half> %va) {
 }
 
 declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f32(<1 x float>, metadata)
-define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) {
+define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f32_v1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -718,7 +718,7 @@ define <1 x i1> @vfptosi_v1f32_v1i1(<1 x float> %va) {
 }
 
 declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f32(<1 x float>, metadata)
-define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) {
+define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f32_v1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -731,7 +731,7 @@ define <1 x i1> @vfptoui_v1f32_v1i1(<1 x float> %va) {
 }
 
 declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f32(<1 x float>, metadata)
-define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) {
+define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f32_v1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -744,7 +744,7 @@ define <1 x i8> @vfptosi_v1f32_v1i8(<1 x float> %va) {
 }
 
 declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f32(<1 x float>, metadata)
-define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) {
+define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f32_v1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -757,7 +757,7 @@ define <1 x i8> @vfptoui_v1f32_v1i8(<1 x float> %va) {
 }
 
 declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f32(<1 x float>, metadata)
-define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) {
+define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f32_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -769,7 +769,7 @@ define <1 x i16> @vfptosi_v1f32_v1i16(<1 x float> %va) {
 }
 
 declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f32(<1 x float>, metadata)
-define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) {
+define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f32_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -781,7 +781,7 @@ define <1 x i16> @vfptoui_v1f32_v1i16(<1 x float> %va) {
 }
 
 declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f32(<1 x float>, metadata)
-define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) {
+define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f32_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -792,7 +792,7 @@ define <1 x i32> @vfptosi_v1f32_v1i32(<1 x float> %va) {
 }
 
 declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f32(<1 x float>, metadata)
-define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) {
+define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f32_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -803,7 +803,7 @@ define <1 x i32> @vfptoui_v1f32_v1i32(<1 x float> %va) {
 }
 
 declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f32(<1 x float>, metadata)
-define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) {
+define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f32_v1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -815,7 +815,7 @@ define <1 x i64> @vfptosi_v1f32_v1i64(<1 x float> %va) {
 }
 
 declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f32(<1 x float>, metadata)
-define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) {
+define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f32_v1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -827,7 +827,7 @@ define <1 x i64> @vfptoui_v1f32_v1i64(<1 x float> %va) {
 }
 
 declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f32(<2 x float>, metadata)
-define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) {
+define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f32_v2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -840,7 +840,7 @@ define <2 x i1> @vfptosi_v2f32_v2i1(<2 x float> %va) {
 }
 
 declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f32(<2 x float>, metadata)
-define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) {
+define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f32_v2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -853,7 +853,7 @@ define <2 x i1> @vfptoui_v2f32_v2i1(<2 x float> %va) {
 }
 
 declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f32(<2 x float>, metadata)
-define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) {
+define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f32_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -866,7 +866,7 @@ define <2 x i8> @vfptosi_v2f32_v2i8(<2 x float> %va) {
 }
 
 declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f32(<2 x float>, metadata)
-define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) {
+define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f32_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -879,7 +879,7 @@ define <2 x i8> @vfptoui_v2f32_v2i8(<2 x float> %va) {
 }
 
 declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f32(<2 x float>, metadata)
-define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) {
+define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f32_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -891,7 +891,7 @@ define <2 x i16> @vfptosi_v2f32_v2i16(<2 x float> %va) {
 }
 
 declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f32(<2 x float>, metadata)
-define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) {
+define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f32_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -903,7 +903,7 @@ define <2 x i16> @vfptoui_v2f32_v2i16(<2 x float> %va) {
 }
 
 declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f32(<2 x float>, metadata)
-define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) {
+define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f32_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -914,7 +914,7 @@ define <2 x i32> @vfptosi_v2f32_v2i32(<2 x float> %va) {
 }
 
 declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f32(<2 x float>, metadata)
-define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) {
+define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f32_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -925,7 +925,7 @@ define <2 x i32> @vfptoui_v2f32_v2i32(<2 x float> %va) {
 }
 
 declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f32(<2 x float>, metadata)
-define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) {
+define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f32_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -937,7 +937,7 @@ define <2 x i64> @vfptosi_v2f32_v2i64(<2 x float> %va) {
 }
 
 declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f32(<2 x float>, metadata)
-define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) {
+define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f32_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -949,7 +949,7 @@ define <2 x i64> @vfptoui_v2f32_v2i64(<2 x float> %va) {
 }
 
 declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f32(<4 x float>, metadata)
-define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) {
+define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f32_v4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -962,7 +962,7 @@ define <4 x i1> @vfptosi_v4f32_v4i1(<4 x float> %va) {
 }
 
 declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f32(<4 x float>, metadata)
-define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) {
+define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f32_v4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -975,7 +975,7 @@ define <4 x i1> @vfptoui_v4f32_v4i1(<4 x float> %va) {
 }
 
 declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f32(<4 x float>, metadata)
-define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) {
+define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f32_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -988,7 +988,7 @@ define <4 x i8> @vfptosi_v4f32_v4i8(<4 x float> %va) {
 }
 
 declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f32(<4 x float>, metadata)
-define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) {
+define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f32_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -1001,7 +1001,7 @@ define <4 x i8> @vfptoui_v4f32_v4i8(<4 x float> %va) {
 }
 
 declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f32(<4 x float>, metadata)
-define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) {
+define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f32_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -1013,7 +1013,7 @@ define <4 x i16> @vfptosi_v4f32_v4i16(<4 x float> %va) {
 }
 
 declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f32(<4 x float>, metadata)
-define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) {
+define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f32_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -1025,7 +1025,7 @@ define <4 x i16> @vfptoui_v4f32_v4i16(<4 x float> %va) {
 }
 
 declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
-define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) {
+define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f32_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1036,7 +1036,7 @@ define <4 x i32> @vfptosi_v4f32_v4i32(<4 x float> %va) {
 }
 
 declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
-define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) {
+define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f32_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1047,7 +1047,7 @@ define <4 x i32> @vfptoui_v4f32_v4i32(<4 x float> %va) {
 }
 
 declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
-define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) {
+define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f32_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1059,7 +1059,7 @@ define <4 x i64> @vfptosi_v4f32_v4i64(<4 x float> %va) {
 }
 
 declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
-define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) {
+define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f32_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1071,7 +1071,7 @@ define <4 x i64> @vfptoui_v4f32_v4i64(<4 x float> %va) {
 }
 
 declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f32(<8 x float>, metadata)
-define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) {
+define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f32_v8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1084,7 +1084,7 @@ define <8 x i1> @vfptosi_v8f32_v8i1(<8 x float> %va) {
 }
 
 declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f32(<8 x float>, metadata)
-define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) {
+define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f32_v8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1097,7 +1097,7 @@ define <8 x i1> @vfptoui_v8f32_v8i1(<8 x float> %va) {
 }
 
 declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f32(<8 x float>, metadata)
-define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) {
+define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f32_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1110,7 +1110,7 @@ define <8 x i8> @vfptosi_v8f32_v8i8(<8 x float> %va) {
 }
 
 declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f32(<8 x float>, metadata)
-define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) {
+define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f32_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1123,7 +1123,7 @@ define <8 x i8> @vfptoui_v8f32_v8i8(<8 x float> %va) {
 }
 
 declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f32(<8 x float>, metadata)
-define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) {
+define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f32_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1135,7 +1135,7 @@ define <8 x i16> @vfptosi_v8f32_v8i16(<8 x float> %va) {
 }
 
 declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f32(<8 x float>, metadata)
-define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) {
+define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f32_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1147,7 +1147,7 @@ define <8 x i16> @vfptoui_v8f32_v8i16(<8 x float> %va) {
 }
 
 declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f32(<8 x float>, metadata)
-define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) {
+define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f32_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1158,7 +1158,7 @@ define <8 x i32> @vfptosi_v8f32_v8i32(<8 x float> %va) {
 }
 
 declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f32(<8 x float>, metadata)
-define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) {
+define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f32_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1169,7 +1169,7 @@ define <8 x i32> @vfptoui_v8f32_v8i32(<8 x float> %va) {
 }
 
 declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f32(<8 x float>, metadata)
-define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) {
+define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f32_v8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1181,7 +1181,7 @@ define <8 x i64> @vfptosi_v8f32_v8i64(<8 x float> %va) {
 }
 
 declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f32(<8 x float>, metadata)
-define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) {
+define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f32_v8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1193,7 +1193,7 @@ define <8 x i64> @vfptoui_v8f32_v8i64(<8 x float> %va) {
 }
 
 declare <16 x i1> @llvm.experimental.constrained.fptosi.v16i1.v16f32(<16 x float>, metadata)
-define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) {
+define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f32_v16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1206,7 +1206,7 @@ define <16 x i1> @vfptosi_v16f32_v16i1(<16 x float> %va) {
 }
 
 declare <16 x i1> @llvm.experimental.constrained.fptoui.v16i1.v16f32(<16 x float>, metadata)
-define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) {
+define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f32_v16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1219,7 +1219,7 @@ define <16 x i1> @vfptoui_v16f32_v16i1(<16 x float> %va) {
 }
 
 declare <16 x i8> @llvm.experimental.constrained.fptosi.v16i8.v16f32(<16 x float>, metadata)
-define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) {
+define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f32_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1232,7 +1232,7 @@ define <16 x i8> @vfptosi_v16f32_v16i8(<16 x float> %va) {
 }
 
 declare <16 x i8> @llvm.experimental.constrained.fptoui.v16i8.v16f32(<16 x float>, metadata)
-define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) {
+define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f32_v16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1245,7 +1245,7 @@ define <16 x i8> @vfptoui_v16f32_v16i8(<16 x float> %va) {
 }
 
 declare <16 x i16> @llvm.experimental.constrained.fptosi.v16i16.v16f32(<16 x float>, metadata)
-define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) {
+define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f32_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1257,7 +1257,7 @@ define <16 x i16> @vfptosi_v16f32_v16i16(<16 x float> %va) {
 }
 
 declare <16 x i16> @llvm.experimental.constrained.fptoui.v16i16.v16f32(<16 x float>, metadata)
-define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) {
+define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f32_v16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1269,7 +1269,7 @@ define <16 x i16> @vfptoui_v16f32_v16i16(<16 x float> %va) {
 }
 
 declare <16 x i32> @llvm.experimental.constrained.fptosi.v16i32.v16f32(<16 x float>, metadata)
-define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) {
+define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v16f32_v16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1280,7 +1280,7 @@ define <16 x i32> @vfptosi_v16f32_v16i32(<16 x float> %va) {
 }
 
 declare <16 x i32> @llvm.experimental.constrained.fptoui.v16i32.v16f32(<16 x float>, metadata)
-define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) {
+define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v16f32_v16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1291,7 +1291,7 @@ define <16 x i32> @vfptoui_v16f32_v16i32(<16 x float> %va) {
 }
 
 declare <1 x i1> @llvm.experimental.constrained.fptosi.v1i1.v1f64(<1 x double>, metadata)
-define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) {
+define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f64_v1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1304,7 +1304,7 @@ define <1 x i1> @vfptosi_v1f64_v1i1(<1 x double> %va) {
 }
 
 declare <1 x i1> @llvm.experimental.constrained.fptoui.v1i1.v1f64(<1 x double>, metadata)
-define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) {
+define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f64_v1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1317,7 +1317,7 @@ define <1 x i1> @vfptoui_v1f64_v1i1(<1 x double> %va) {
 }
 
 declare <1 x i8> @llvm.experimental.constrained.fptosi.v1i8.v1f64(<1 x double>, metadata)
-define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) {
+define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f64_v1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1332,7 +1332,7 @@ define <1 x i8> @vfptosi_v1f64_v1i8(<1 x double> %va) {
 }
 
 declare <1 x i8> @llvm.experimental.constrained.fptoui.v1i8.v1f64(<1 x double>, metadata)
-define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) {
+define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f64_v1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1347,7 +1347,7 @@ define <1 x i8> @vfptoui_v1f64_v1i8(<1 x double> %va) {
 }
 
 declare <1 x i16> @llvm.experimental.constrained.fptosi.v1i16.v1f64(<1 x double>, metadata)
-define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) {
+define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f64_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1360,7 +1360,7 @@ define <1 x i16> @vfptosi_v1f64_v1i16(<1 x double> %va) {
 }
 
 declare <1 x i16> @llvm.experimental.constrained.fptoui.v1i16.v1f64(<1 x double>, metadata)
-define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) {
+define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f64_v1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1373,7 +1373,7 @@ define <1 x i16> @vfptoui_v1f64_v1i16(<1 x double> %va) {
 }
 
 declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
-define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) {
+define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f64_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1385,7 +1385,7 @@ define <1 x i32> @vfptosi_v1f64_v1i32(<1 x double> %va) {
 }
 
 declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
-define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) {
+define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f64_v1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1397,7 +1397,7 @@ define <1 x i32> @vfptoui_v1f64_v1i32(<1 x double> %va) {
 }
 
 declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
-define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) {
+define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v1f64_v1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1408,7 +1408,7 @@ define <1 x i64> @vfptosi_v1f64_v1i64(<1 x double> %va) {
 }
 
 declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
-define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) {
+define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v1f64_v1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1419,7 +1419,7 @@ define <1 x i64> @vfptoui_v1f64_v1i64(<1 x double> %va) {
 }
 
 declare <2 x i1> @llvm.experimental.constrained.fptosi.v2i1.v2f64(<2 x double>, metadata)
-define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) {
+define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f64_v2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1432,7 +1432,7 @@ define <2 x i1> @vfptosi_v2f64_v2i1(<2 x double> %va) {
 }
 
 declare <2 x i1> @llvm.experimental.constrained.fptoui.v2i1.v2f64(<2 x double>, metadata)
-define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) {
+define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f64_v2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1445,7 +1445,7 @@ define <2 x i1> @vfptoui_v2f64_v2i1(<2 x double> %va) {
 }
 
 declare <2 x i8> @llvm.experimental.constrained.fptosi.v2i8.v2f64(<2 x double>, metadata)
-define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) {
+define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f64_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1460,7 +1460,7 @@ define <2 x i8> @vfptosi_v2f64_v2i8(<2 x double> %va) {
 }
 
 declare <2 x i8> @llvm.experimental.constrained.fptoui.v2i8.v2f64(<2 x double>, metadata)
-define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) {
+define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f64_v2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1475,7 +1475,7 @@ define <2 x i8> @vfptoui_v2f64_v2i8(<2 x double> %va) {
 }
 
 declare <2 x i16> @llvm.experimental.constrained.fptosi.v2i16.v2f64(<2 x double>, metadata)
-define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) {
+define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f64_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1488,7 +1488,7 @@ define <2 x i16> @vfptosi_v2f64_v2i16(<2 x double> %va) {
 }
 
 declare <2 x i16> @llvm.experimental.constrained.fptoui.v2i16.v2f64(<2 x double>, metadata)
-define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) {
+define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f64_v2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1501,7 +1501,7 @@ define <2 x i16> @vfptoui_v2f64_v2i16(<2 x double> %va) {
 }
 
 declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
-define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) {
+define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f64_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1513,7 +1513,7 @@ define <2 x i32> @vfptosi_v2f64_v2i32(<2 x double> %va) {
 }
 
 declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
-define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) {
+define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f64_v2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1525,7 +1525,7 @@ define <2 x i32> @vfptoui_v2f64_v2i32(<2 x double> %va) {
 }
 
 declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
-define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) {
+define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v2f64_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -1536,7 +1536,7 @@ define <2 x i64> @vfptosi_v2f64_v2i64(<2 x double> %va) {
 }
 
 declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
-define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) {
+define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v2f64_v2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -1547,7 +1547,7 @@ define <2 x i64> @vfptoui_v2f64_v2i64(<2 x double> %va) {
 }
 
 declare <4 x i1> @llvm.experimental.constrained.fptosi.v4i1.v4f64(<4 x double>, metadata)
-define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) {
+define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f64_v4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1560,7 +1560,7 @@ define <4 x i1> @vfptosi_v4f64_v4i1(<4 x double> %va) {
 }
 
 declare <4 x i1> @llvm.experimental.constrained.fptoui.v4i1.v4f64(<4 x double>, metadata)
-define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) {
+define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f64_v4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1573,7 +1573,7 @@ define <4 x i1> @vfptoui_v4f64_v4i1(<4 x double> %va) {
 }
 
 declare <4 x i8> @llvm.experimental.constrained.fptosi.v4i8.v4f64(<4 x double>, metadata)
-define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) {
+define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f64_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1588,7 +1588,7 @@ define <4 x i8> @vfptosi_v4f64_v4i8(<4 x double> %va) {
 }
 
 declare <4 x i8> @llvm.experimental.constrained.fptoui.v4i8.v4f64(<4 x double>, metadata)
-define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) {
+define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f64_v4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1603,7 +1603,7 @@ define <4 x i8> @vfptoui_v4f64_v4i8(<4 x double> %va) {
 }
 
 declare <4 x i16> @llvm.experimental.constrained.fptosi.v4i16.v4f64(<4 x double>, metadata)
-define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) {
+define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f64_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1616,7 +1616,7 @@ define <4 x i16> @vfptosi_v4f64_v4i16(<4 x double> %va) {
 }
 
 declare <4 x i16> @llvm.experimental.constrained.fptoui.v4i16.v4f64(<4 x double>, metadata)
-define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) {
+define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f64_v4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1629,7 +1629,7 @@ define <4 x i16> @vfptoui_v4f64_v4i16(<4 x double> %va) {
 }
 
 declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f64(<4 x double>, metadata)
-define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) {
+define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f64_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1641,7 +1641,7 @@ define <4 x i32> @vfptosi_v4f64_v4i32(<4 x double> %va) {
 }
 
 declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f64(<4 x double>, metadata)
-define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) {
+define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f64_v4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1653,7 +1653,7 @@ define <4 x i32> @vfptoui_v4f64_v4i32(<4 x double> %va) {
 }
 
 declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f64(<4 x double>, metadata)
-define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) {
+define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v4f64_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -1664,7 +1664,7 @@ define <4 x i64> @vfptosi_v4f64_v4i64(<4 x double> %va) {
 }
 
 declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f64(<4 x double>, metadata)
-define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) {
+define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v4f64_v4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -1675,7 +1675,7 @@ define <4 x i64> @vfptoui_v4f64_v4i64(<4 x double> %va) {
 }
 
 declare <8 x i1> @llvm.experimental.constrained.fptosi.v8i1.v8f64(<8 x double>, metadata)
-define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) {
+define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f64_v8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1688,7 +1688,7 @@ define <8 x i1> @vfptosi_v8f64_v8i1(<8 x double> %va) {
 }
 
 declare <8 x i1> @llvm.experimental.constrained.fptoui.v8i1.v8f64(<8 x double>, metadata)
-define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) {
+define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f64_v8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1701,7 +1701,7 @@ define <8 x i1> @vfptoui_v8f64_v8i1(<8 x double> %va) {
 }
 
 declare <8 x i8> @llvm.experimental.constrained.fptosi.v8i8.v8f64(<8 x double>, metadata)
-define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) {
+define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f64_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1716,7 +1716,7 @@ define <8 x i8> @vfptosi_v8f64_v8i8(<8 x double> %va) {
 }
 
 declare <8 x i8> @llvm.experimental.constrained.fptoui.v8i8.v8f64(<8 x double>, metadata)
-define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) {
+define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f64_v8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1731,7 +1731,7 @@ define <8 x i8> @vfptoui_v8f64_v8i8(<8 x double> %va) {
 }
 
 declare <8 x i16> @llvm.experimental.constrained.fptosi.v8i16.v8f64(<8 x double>, metadata)
-define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) {
+define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f64_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1744,7 +1744,7 @@ define <8 x i16> @vfptosi_v8f64_v8i16(<8 x double> %va) {
 }
 
 declare <8 x i16> @llvm.experimental.constrained.fptoui.v8i16.v8f64(<8 x double>, metadata)
-define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) {
+define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f64_v8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1757,7 +1757,7 @@ define <8 x i16> @vfptoui_v8f64_v8i16(<8 x double> %va) {
 }
 
 declare <8 x i32> @llvm.experimental.constrained.fptosi.v8i32.v8f64(<8 x double>, metadata)
-define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) {
+define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f64_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1769,7 +1769,7 @@ define <8 x i32> @vfptosi_v8f64_v8i32(<8 x double> %va) {
 }
 
 declare <8 x i32> @llvm.experimental.constrained.fptoui.v8i32.v8f64(<8 x double>, metadata)
-define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) {
+define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f64_v8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1781,7 +1781,7 @@ define <8 x i32> @vfptoui_v8f64_v8i32(<8 x double> %va) {
 }
 
 declare <8 x i64> @llvm.experimental.constrained.fptosi.v8i64.v8f64(<8 x double>, metadata)
-define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) {
+define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_v8f64_v8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -1792,7 +1792,7 @@ define <8 x i64> @vfptosi_v8f64_v8i64(<8 x double> %va) {
 }
 
 declare <8 x i64> @llvm.experimental.constrained.fptoui.v8i64.v8f64(<8 x double>, metadata)
-define <8 x i64> @vfptoui_v8f64_v8i64(<8 x double> %va) {
+define <8 x i64> @vfptoui_v8f64_v8i64(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_v8f64_v8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll
index d0e56febcdaa28..32a050800b9792 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptrunc-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
-define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) {
+define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v2f64_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -17,7 +17,7 @@ define <2 x float> @vfptrunc_v2f64_v2f32(<2 x double> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f64(<2 x double>, metadata, metadata)
-define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) {
+define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v2f64_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -30,7 +30,7 @@ define <2 x half> @vfptrunc_v2f64_v2f16(<2 x double> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.fptrunc.v2f16.v2f32(<2 x float>, metadata, metadata)
-define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) {
+define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v2f32_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -42,7 +42,7 @@ define <2 x half> @vfptrunc_v2f32_v2f16(<2 x float> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.fptrunc.v4f32.v4f64(<4 x double>, metadata, metadata)
-define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) {
+define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v4f64_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -54,7 +54,7 @@ define <4 x float> @vfptrunc_v4f64_v4f32(<4 x double> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f64(<4 x double>, metadata, metadata)
-define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) {
+define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v4f64_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -67,7 +67,7 @@ define <4 x half> @vfptrunc_v4f64_v4f16(<4 x double> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.fptrunc.v4f16.v4f32(<4 x float>, metadata, metadata)
-define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) {
+define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v4f32_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -79,7 +79,7 @@ define <4 x half> @vfptrunc_v4f32_v4f16(<4 x float> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.fptrunc.v8f32.v8f64(<8 x double>, metadata, metadata)
-define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) {
+define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v8f64_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -91,7 +91,7 @@ define <8 x float> @vfptrunc_v8f64_v8f32(<8 x double> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f64(<8 x double>, metadata, metadata)
-define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) {
+define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v8f64_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -104,7 +104,7 @@ define <8 x half> @vfptrunc_v8f64_v8f16(<8 x double> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.fptrunc.v8f16.v8f32(<8 x float>, metadata, metadata)
-define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) {
+define <8 x half> @vfptrunc_v8f32_v8f16(<8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_v8f32_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll
index 8f4a4adf3df771..9f29d14050de78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-constrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 declare <2 x half> @llvm.experimental.constrained.sqrt.v2f16(<2 x half>, metadata, metadata)
 
-define <2 x half> @vfsqrt_v2f16(<2 x half> %v) {
+define <2 x half> @vfsqrt_v2f16(<2 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -18,7 +18,7 @@ define <2 x half> @vfsqrt_v2f16(<2 x half> %v) {
 
 declare <4 x half> @llvm.experimental.constrained.sqrt.v4f16(<4 x half>, metadata, metadata)
 
-define <4 x half> @vfsqrt_v4f16(<4 x half> %v) {
+define <4 x half> @vfsqrt_v4f16(<4 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -30,7 +30,7 @@ define <4 x half> @vfsqrt_v4f16(<4 x half> %v) {
 
 declare <8 x half> @llvm.experimental.constrained.sqrt.v8f16(<8 x half>, metadata, metadata)
 
-define <8 x half> @vfsqrt_v8f16(<8 x half> %v) {
+define <8 x half> @vfsqrt_v8f16(<8 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -42,7 +42,7 @@ define <8 x half> @vfsqrt_v8f16(<8 x half> %v) {
 
 declare <16 x half> @llvm.experimental.constrained.sqrt.v16f16(<16 x half>, metadata, metadata)
 
-define <16 x half> @vfsqrt_v16f16(<16 x half> %v) {
+define <16 x half> @vfsqrt_v16f16(<16 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -54,7 +54,7 @@ define <16 x half> @vfsqrt_v16f16(<16 x half> %v) {
 
 declare <32 x half> @llvm.experimental.constrained.sqrt.v32f16(<32 x half>, metadata, metadata)
 
-define <32 x half> @vfsqrt_v32f16(<32 x half> %v) {
+define <32 x half> @vfsqrt_v32f16(<32 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -67,7 +67,7 @@ define <32 x half> @vfsqrt_v32f16(<32 x half> %v) {
 
 declare <2 x float> @llvm.experimental.constrained.sqrt.v2f32(<2 x float>, metadata, metadata)
 
-define <2 x float> @vfsqrt_v2f32(<2 x float> %v) {
+define <2 x float> @vfsqrt_v2f32(<2 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -79,7 +79,7 @@ define <2 x float> @vfsqrt_v2f32(<2 x float> %v) {
 
 declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
 
-define <4 x float> @vfsqrt_v4f32(<4 x float> %v) {
+define <4 x float> @vfsqrt_v4f32(<4 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -91,7 +91,7 @@ define <4 x float> @vfsqrt_v4f32(<4 x float> %v) {
 
 declare <8 x float> @llvm.experimental.constrained.sqrt.v8f32(<8 x float>, metadata, metadata)
 
-define <8 x float> @vfsqrt_v8f32(<8 x float> %v) {
+define <8 x float> @vfsqrt_v8f32(<8 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -103,7 +103,7 @@ define <8 x float> @vfsqrt_v8f32(<8 x float> %v) {
 
 declare <16 x float> @llvm.experimental.constrained.sqrt.v16f32(<16 x float>, metadata, metadata)
 
-define <16 x float> @vfsqrt_v16f32(<16 x float> %v) {
+define <16 x float> @vfsqrt_v16f32(<16 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -115,7 +115,7 @@ define <16 x float> @vfsqrt_v16f32(<16 x float> %v) {
 
 declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
 
-define <2 x double> @vfsqrt_v2f64(<2 x double> %v) {
+define <2 x double> @vfsqrt_v2f64(<2 x double> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -127,7 +127,7 @@ define <2 x double> @vfsqrt_v2f64(<2 x double> %v) {
 
 declare <4 x double> @llvm.experimental.constrained.sqrt.v4f64(<4 x double>, metadata, metadata)
 
-define <4 x double> @vfsqrt_v4f64(<4 x double> %v) {
+define <4 x double> @vfsqrt_v4f64(<4 x double> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -139,7 +139,7 @@ define <4 x double> @vfsqrt_v4f64(<4 x double> %v) {
 
 declare <8 x double> @llvm.experimental.constrained.sqrt.v8f64(<8 x double>, metadata, metadata)
 
-define <8 x double> @vfsqrt_v8f64(<8 x double> %v) {
+define <8 x double> @vfsqrt_v8f64(<8 x double> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll
index e7960d4f018d4f..f9d40d7a117b5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <2 x half> @llvm.experimental.constrained.fsub.v2f16(<2 x half>, <2 x half>, metadata, metadata)
-define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %vb) {
+define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <2 x half> %vc
 }
 
-define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b) {
+define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.fsub.v4f16(<4 x half>, <4 x half>, metadata, metadata)
-define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %vb) {
+define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <4 x half> %vc
 }
 
-define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b) {
+define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -53,7 +53,7 @@ define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.fsub.v8f16(<8 x half>, <8 x half>, metadata, metadata)
-define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %vb) {
+define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <8 x half> %vc
 }
 
-define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b) {
+define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -76,7 +76,7 @@ define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b) {
   ret <8 x half> %vc
 }
 
-define <8 x half> @vfsub_fv_v8f16(<8 x half> %va, half %b) {
+define <8 x half> @vfsub_fv_v8f16(<8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_fv_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -89,7 +89,7 @@ define <8 x half> @vfsub_fv_v8f16(<8 x half> %va, half %b) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.fsub.v16f16(<16 x half>, <16 x half>, metadata, metadata)
-define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %vb) {
+define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -100,7 +100,7 @@ entry:
   ret <16 x half> %vc
 }
 
-define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b) {
+define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.fsub.v32f16(<32 x half>, <32 x half>, metadata, metadata)
-define <32 x half> @vfsub_vv_v32f16(<32 x half> %va, <32 x half> %vb) {
+define <32 x half> @vfsub_vv_v32f16(<32 x half> %va, <32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    li a0, 32
@@ -125,7 +125,7 @@ entry:
   ret <32 x half> %vc
 }
 
-define <32 x half> @vfsub_vf_v32f16(<32 x half> %va, half %b) {
+define <32 x half> @vfsub_vf_v32f16(<32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -139,7 +139,7 @@ define <32 x half> @vfsub_vf_v32f16(<32 x half> %va, half %b) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float>, <2 x float>, metadata, metadata)
-define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %vb) {
+define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -150,7 +150,7 @@ entry:
   ret <2 x float> %vc
 }
 
-define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b) {
+define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -163,7 +163,7 @@ define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata)
-define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %vb) {
+define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -174,7 +174,7 @@ entry:
   ret <4 x float> %vc
 }
 
-define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b) {
+define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -187,7 +187,7 @@ define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata)
-define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %vb) {
+define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -198,7 +198,7 @@ entry:
   ret <8 x float> %vc
 }
 
-define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b) {
+define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -210,7 +210,7 @@ define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b) {
   ret <8 x float> %vc
 }
 
-define <8 x float> @vfsub_fv_v8f32(<8 x float> %va, float %b) {
+define <8 x float> @vfsub_fv_v8f32(<8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_fv_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -223,7 +223,7 @@ define <8 x float> @vfsub_fv_v8f32(<8 x float> %va, float %b) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata)
-define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %vb) {
+define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -234,7 +234,7 @@ entry:
   ret <16 x float> %vc
 }
 
-define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b) {
+define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -247,7 +247,7 @@ define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
-define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %vb) {
+define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -258,7 +258,7 @@ entry:
   ret <2 x double> %vc
 }
 
-define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b) {
+define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -271,7 +271,7 @@ define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
-define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %vb) {
+define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -282,7 +282,7 @@ entry:
   ret <4 x double> %vc
 }
 
-define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b) {
+define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -295,7 +295,7 @@ define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata)
-define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %vb) {
+define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_v8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -306,7 +306,7 @@ entry:
   ret <8 x double> %vc
 }
 
-define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b) {
+define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -318,7 +318,7 @@ define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b) {
   ret <8 x double> %vc
 }
 
-define <8 x double> @vfsub_fv_v8f64(<8 x double> %va, double %b) {
+define <8 x double> @vfsub_fv_v8f64(<8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_fv_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
index b2108474111dfe..a1e839091ca3d7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vitofp-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
-define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) {
+define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i1_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -18,7 +18,7 @@ define <1 x half> @vsitofp_v1i1_v1f16(<1 x i1> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i1(<1 x i1>, metadata, metadata)
-define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) {
+define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i1_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -31,7 +31,7 @@ define <1 x half> @vuitofp_v1i1_v1f16(<1 x i1> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
-define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) {
+define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i1_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -44,7 +44,7 @@ define <1 x float> @vsitofp_v1i1_v1f32(<1 x i1> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i1(<1 x i1>, metadata, metadata)
-define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) {
+define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i1_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -57,7 +57,7 @@ define <1 x float> @vuitofp_v1i1_v1f32(<1 x i1> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
-define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) {
+define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i1_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -70,7 +70,7 @@ define <1 x double> @vsitofp_v1i1_v1f64(<1 x i1> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i1(<1 x i1>, metadata, metadata)
-define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) {
+define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i1_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -83,7 +83,7 @@ define <1 x double> @vuitofp_v1i1_v1f64(<1 x i1> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
-define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) {
+define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i1_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -96,7 +96,7 @@ define <2 x half> @vsitofp_v2i1_v2f16(<2 x i1> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i1(<2 x i1>, metadata, metadata)
-define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) {
+define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i1_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -109,7 +109,7 @@ define <2 x half> @vuitofp_v2i1_v2f16(<2 x i1> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
-define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) {
+define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i1_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -122,7 +122,7 @@ define <2 x float> @vsitofp_v2i1_v2f32(<2 x i1> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i1(<2 x i1>, metadata, metadata)
-define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) {
+define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i1_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -135,7 +135,7 @@ define <2 x float> @vuitofp_v2i1_v2f32(<2 x i1> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
-define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) {
+define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i1_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -148,7 +148,7 @@ define <2 x double> @vsitofp_v2i1_v2f64(<2 x i1> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i1(<2 x i1>, metadata, metadata)
-define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) {
+define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i1_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -161,7 +161,7 @@ define <2 x double> @vuitofp_v2i1_v2f64(<2 x i1> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
-define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) {
+define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i1_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -174,7 +174,7 @@ define <4 x half> @vsitofp_v4i1_v4f16(<4 x i1> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i1(<4 x i1>, metadata, metadata)
-define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) {
+define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i1_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -187,7 +187,7 @@ define <4 x half> @vuitofp_v4i1_v4f16(<4 x i1> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
-define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) {
+define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i1_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -200,7 +200,7 @@ define <4 x float> @vsitofp_v4i1_v4f32(<4 x i1> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i1(<4 x i1>, metadata, metadata)
-define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) {
+define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i1_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -213,7 +213,7 @@ define <4 x float> @vuitofp_v4i1_v4f32(<4 x i1> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
-define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) {
+define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i1_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -226,7 +226,7 @@ define <4 x double> @vsitofp_v4i1_v4f64(<4 x i1> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i1(<4 x i1>, metadata, metadata)
-define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) {
+define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i1_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -239,7 +239,7 @@ define <4 x double> @vuitofp_v4i1_v4f64(<4 x i1> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
-define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) {
+define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i1_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -252,7 +252,7 @@ define <8 x half> @vsitofp_v8i1_v8f16(<8 x i1> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i1(<8 x i1>, metadata, metadata)
-define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) {
+define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i1_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -265,7 +265,7 @@ define <8 x half> @vuitofp_v8i1_v8f16(<8 x i1> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
-define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) {
+define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i1_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -278,7 +278,7 @@ define <8 x float> @vsitofp_v8i1_v8f32(<8 x i1> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i1(<8 x i1>, metadata, metadata)
-define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) {
+define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i1_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -291,7 +291,7 @@ define <8 x float> @vuitofp_v8i1_v8f32(<8 x i1> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
-define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) {
+define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i1_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -304,7 +304,7 @@ define <8 x double> @vsitofp_v8i1_v8f64(<8 x i1> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i1(<8 x i1>, metadata, metadata)
-define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) {
+define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i1_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -317,7 +317,7 @@ define <8 x double> @vuitofp_v8i1_v8f64(<8 x i1> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
-define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) {
+define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i1_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -330,7 +330,7 @@ define <16 x half> @vsitofp_v16i1_v16f16(<16 x i1> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i1(<16 x i1>, metadata, metadata)
-define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) {
+define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i1_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -343,7 +343,7 @@ define <16 x half> @vuitofp_v16i1_v16f16(<16 x i1> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
-define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) {
+define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i1_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -356,7 +356,7 @@ define <16 x float> @vsitofp_v16i1_v16f32(<16 x i1> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i1(<16 x i1>, metadata, metadata)
-define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) {
+define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i1_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -369,7 +369,7 @@ define <16 x float> @vuitofp_v16i1_v16f32(<16 x i1> %va) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
-define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) {
+define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v32i1_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -383,7 +383,7 @@ define <32 x half> @vsitofp_v32i1_v32f16(<32 x i1> %va) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i1(<32 x i1>, metadata, metadata)
-define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) {
+define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v32i1_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -397,7 +397,7 @@ define <32 x half> @vuitofp_v32i1_v32f16(<32 x i1> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
-define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) {
+define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i8_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -409,13 +409,13 @@ define <1 x half> @vsitofp_v1i8_v1f16(<1 x i8> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
-define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) {
+define <1 x half> @vsitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
   %evec = call <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i7(<1 x i7> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
   ret <1 x half> %evec
 }
 
 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i7(<1 x i7>, metadata, metadata)
-define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) {
+define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i7_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    addi sp, sp, -16
@@ -433,7 +433,7 @@ define <1 x half> @vuitofp_v1i7_v1f16(<1 x i7> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i8(<1 x i8>, metadata, metadata)
-define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) {
+define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i8_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
@@ -445,7 +445,7 @@ define <1 x half> @vuitofp_v1i8_v1f16(<1 x i8> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
-define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) {
+define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i8_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -457,7 +457,7 @@ define <1 x float> @vsitofp_v1i8_v1f32(<1 x i8> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i8(<1 x i8>, metadata, metadata)
-define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) {
+define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i8_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -469,7 +469,7 @@ define <1 x float> @vuitofp_v1i8_v1f32(<1 x i8> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
-define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) {
+define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i8_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -481,7 +481,7 @@ define <1 x double> @vsitofp_v1i8_v1f64(<1 x i8> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i8(<1 x i8>, metadata, metadata)
-define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) {
+define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i8_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -493,7 +493,7 @@ define <1 x double> @vuitofp_v1i8_v1f64(<1 x i8> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
-define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) {
+define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i8_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -505,7 +505,7 @@ define <2 x half> @vsitofp_v2i8_v2f16(<2 x i8> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i8(<2 x i8>, metadata, metadata)
-define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) {
+define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i8_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
@@ -517,7 +517,7 @@ define <2 x half> @vuitofp_v2i8_v2f16(<2 x i8> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
-define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) {
+define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i8_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -529,7 +529,7 @@ define <2 x float> @vsitofp_v2i8_v2f32(<2 x i8> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i8(<2 x i8>, metadata, metadata)
-define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) {
+define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i8_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -541,7 +541,7 @@ define <2 x float> @vuitofp_v2i8_v2f32(<2 x i8> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
-define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) {
+define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i8_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -553,7 +553,7 @@ define <2 x double> @vsitofp_v2i8_v2f64(<2 x i8> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i8(<2 x i8>, metadata, metadata)
-define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) {
+define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i8_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -565,7 +565,7 @@ define <2 x double> @vuitofp_v2i8_v2f64(<2 x i8> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
-define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) {
+define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i8_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -577,7 +577,7 @@ define <4 x half> @vsitofp_v4i8_v4f16(<4 x i8> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i8(<4 x i8>, metadata, metadata)
-define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) {
+define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i8_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
@@ -589,7 +589,7 @@ define <4 x half> @vuitofp_v4i8_v4f16(<4 x i8> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
-define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) {
+define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i8_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -601,7 +601,7 @@ define <4 x float> @vsitofp_v4i8_v4f32(<4 x i8> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i8(<4 x i8>, metadata, metadata)
-define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) {
+define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i8_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -613,7 +613,7 @@ define <4 x float> @vuitofp_v4i8_v4f32(<4 x i8> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
-define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) {
+define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i8_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -625,7 +625,7 @@ define <4 x double> @vsitofp_v4i8_v4f64(<4 x i8> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i8(<4 x i8>, metadata, metadata)
-define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) {
+define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i8_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -637,7 +637,7 @@ define <4 x double> @vuitofp_v4i8_v4f64(<4 x i8> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
-define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) {
+define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i8_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -649,7 +649,7 @@ define <8 x half> @vsitofp_v8i8_v8f16(<8 x i8> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i8(<8 x i8>, metadata, metadata)
-define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) {
+define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i8_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
@@ -661,7 +661,7 @@ define <8 x half> @vuitofp_v8i8_v8f16(<8 x i8> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
-define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) {
+define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i8_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -673,7 +673,7 @@ define <8 x float> @vsitofp_v8i8_v8f32(<8 x i8> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i8(<8 x i8>, metadata, metadata)
-define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) {
+define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i8_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -685,7 +685,7 @@ define <8 x float> @vuitofp_v8i8_v8f32(<8 x i8> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
-define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) {
+define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i8_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -697,7 +697,7 @@ define <8 x double> @vsitofp_v8i8_v8f64(<8 x i8> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i8(<8 x i8>, metadata, metadata)
-define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) {
+define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i8_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -709,7 +709,7 @@ define <8 x double> @vuitofp_v8i8_v8f64(<8 x i8> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
-define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) {
+define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i8_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -721,7 +721,7 @@ define <16 x half> @vsitofp_v16i8_v16f16(<16 x i8> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i8(<16 x i8>, metadata, metadata)
-define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) {
+define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i8_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
@@ -733,7 +733,7 @@ define <16 x half> @vuitofp_v16i8_v16f16(<16 x i8> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
-define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) {
+define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i8_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -745,7 +745,7 @@ define <16 x float> @vsitofp_v16i8_v16f32(<16 x i8> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i8(<16 x i8>, metadata, metadata)
-define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) {
+define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i8_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -757,7 +757,7 @@ define <16 x float> @vuitofp_v16i8_v16f32(<16 x i8> %va) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
-define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) {
+define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v32i8_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -770,7 +770,7 @@ define <32 x half> @vsitofp_v32i8_v32f16(<32 x i8> %va) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i8(<32 x i8>, metadata, metadata)
-define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) {
+define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v32i8_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -783,7 +783,7 @@ define <32 x half> @vuitofp_v32i8_v32f16(<32 x i8> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
-define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) {
+define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i16_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -794,7 +794,7 @@ define <1 x half> @vsitofp_v1i16_v1f16(<1 x i16> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i16(<1 x i16>, metadata, metadata)
-define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) {
+define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i16_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -805,7 +805,7 @@ define <1 x half> @vuitofp_v1i16_v1f16(<1 x i16> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
-define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) {
+define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i16_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -817,7 +817,7 @@ define <1 x float> @vsitofp_v1i16_v1f32(<1 x i16> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i16(<1 x i16>, metadata, metadata)
-define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) {
+define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i16_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -829,7 +829,7 @@ define <1 x float> @vuitofp_v1i16_v1f32(<1 x i16> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
-define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) {
+define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i16_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -841,7 +841,7 @@ define <1 x double> @vsitofp_v1i16_v1f64(<1 x i16> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i16(<1 x i16>, metadata, metadata)
-define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) {
+define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i16_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -853,7 +853,7 @@ define <1 x double> @vuitofp_v1i16_v1f64(<1 x i16> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
-define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) {
+define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i16_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -864,7 +864,7 @@ define <2 x half> @vsitofp_v2i16_v2f16(<2 x i16> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i16(<2 x i16>, metadata, metadata)
-define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) {
+define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i16_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -875,7 +875,7 @@ define <2 x half> @vuitofp_v2i16_v2f16(<2 x i16> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
-define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) {
+define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i16_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -887,7 +887,7 @@ define <2 x float> @vsitofp_v2i16_v2f32(<2 x i16> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i16(<2 x i16>, metadata, metadata)
-define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) {
+define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i16_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -899,7 +899,7 @@ define <2 x float> @vuitofp_v2i16_v2f32(<2 x i16> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
-define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) {
+define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i16_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -911,7 +911,7 @@ define <2 x double> @vsitofp_v2i16_v2f64(<2 x i16> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i16(<2 x i16>, metadata, metadata)
-define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) {
+define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i16_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -923,7 +923,7 @@ define <2 x double> @vuitofp_v2i16_v2f64(<2 x i16> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
-define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) {
+define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i16_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -934,7 +934,7 @@ define <4 x half> @vsitofp_v4i16_v4f16(<4 x i16> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i16(<4 x i16>, metadata, metadata)
-define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) {
+define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i16_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -945,7 +945,7 @@ define <4 x half> @vuitofp_v4i16_v4f16(<4 x i16> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
-define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) {
+define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i16_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -957,7 +957,7 @@ define <4 x float> @vsitofp_v4i16_v4f32(<4 x i16> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i16(<4 x i16>, metadata, metadata)
-define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) {
+define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i16_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -969,7 +969,7 @@ define <4 x float> @vuitofp_v4i16_v4f32(<4 x i16> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
-define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) {
+define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i16_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -981,7 +981,7 @@ define <4 x double> @vsitofp_v4i16_v4f64(<4 x i16> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i16(<4 x i16>, metadata, metadata)
-define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) {
+define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i16_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -993,7 +993,7 @@ define <4 x double> @vuitofp_v4i16_v4f64(<4 x i16> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
-define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) {
+define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i16_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1004,7 +1004,7 @@ define <8 x half> @vsitofp_v8i16_v8f16(<8 x i16> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i16(<8 x i16>, metadata, metadata)
-define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) {
+define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i16_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1015,7 +1015,7 @@ define <8 x half> @vuitofp_v8i16_v8f16(<8 x i16> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
-define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) {
+define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i16_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1027,7 +1027,7 @@ define <8 x float> @vsitofp_v8i16_v8f32(<8 x i16> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i16(<8 x i16>, metadata, metadata)
-define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) {
+define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i16_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1039,7 +1039,7 @@ define <8 x float> @vuitofp_v8i16_v8f32(<8 x i16> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
-define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) {
+define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i16_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1051,7 +1051,7 @@ define <8 x double> @vsitofp_v8i16_v8f64(<8 x i16> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i16(<8 x i16>, metadata, metadata)
-define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) {
+define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i16_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1063,7 +1063,7 @@ define <8 x double> @vuitofp_v8i16_v8f64(<8 x i16> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
-define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) {
+define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i16_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1074,7 +1074,7 @@ define <16 x half> @vsitofp_v16i16_v16f16(<16 x i16> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i16(<16 x i16>, metadata, metadata)
-define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) {
+define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i16_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1085,7 +1085,7 @@ define <16 x half> @vuitofp_v16i16_v16f16(<16 x i16> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
-define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) {
+define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i16_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1097,7 +1097,7 @@ define <16 x float> @vsitofp_v16i16_v16f32(<16 x i16> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i16(<16 x i16>, metadata, metadata)
-define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) {
+define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i16_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1109,7 +1109,7 @@ define <16 x float> @vuitofp_v16i16_v16f32(<16 x i16> %va) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.sitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
-define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) {
+define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v32i16_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -1121,7 +1121,7 @@ define <32 x half> @vsitofp_v32i16_v32f16(<32 x i16> %va) {
 }
 
 declare <32 x half> @llvm.experimental.constrained.uitofp.v32f16.v32i16(<32 x i16>, metadata, metadata)
-define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) {
+define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v32i16_v32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 32
@@ -1133,7 +1133,7 @@ define <32 x half> @vuitofp_v32i16_v32f16(<32 x i16> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
-define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) {
+define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i32_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -1145,7 +1145,7 @@ define <1 x half> @vsitofp_v1i32_v1f16(<1 x i32> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i32(<1 x i32>, metadata, metadata)
-define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) {
+define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i32_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
@@ -1157,7 +1157,7 @@ define <1 x half> @vuitofp_v1i32_v1f16(<1 x i32> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
-define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) {
+define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i32_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1168,7 +1168,7 @@ define <1 x float> @vsitofp_v1i32_v1f32(<1 x i32> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i32(<1 x i32>, metadata, metadata)
-define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) {
+define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i32_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1179,7 +1179,7 @@ define <1 x float> @vuitofp_v1i32_v1f32(<1 x i32> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
-define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) {
+define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i32_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1191,7 +1191,7 @@ define <1 x double> @vsitofp_v1i32_v1f64(<1 x i32> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
-define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) {
+define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i32_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1203,7 +1203,7 @@ define <1 x double> @vuitofp_v1i32_v1f64(<1 x i32> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
-define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) {
+define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i32_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -1215,7 +1215,7 @@ define <2 x half> @vsitofp_v2i32_v2f16(<2 x i32> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i32(<2 x i32>, metadata, metadata)
-define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) {
+define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i32_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
@@ -1227,7 +1227,7 @@ define <2 x half> @vuitofp_v2i32_v2f16(<2 x i32> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
-define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) {
+define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i32_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1238,7 +1238,7 @@ define <2 x float> @vsitofp_v2i32_v2f32(<2 x i32> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i32(<2 x i32>, metadata, metadata)
-define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) {
+define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i32_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1249,7 +1249,7 @@ define <2 x float> @vuitofp_v2i32_v2f32(<2 x i32> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
-define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) {
+define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i32_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1261,7 +1261,7 @@ define <2 x double> @vsitofp_v2i32_v2f64(<2 x i32> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
-define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) {
+define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i32_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1273,7 +1273,7 @@ define <2 x double> @vuitofp_v2i32_v2f64(<2 x i32> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
-define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) {
+define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i32_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -1285,7 +1285,7 @@ define <4 x half> @vsitofp_v4i32_v4f16(<4 x i32> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i32(<4 x i32>, metadata, metadata)
-define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) {
+define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i32_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
@@ -1297,7 +1297,7 @@ define <4 x half> @vuitofp_v4i32_v4f16(<4 x i32> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
-define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) {
+define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i32_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1308,7 +1308,7 @@ define <4 x float> @vsitofp_v4i32_v4f32(<4 x i32> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
-define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) {
+define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i32_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1319,7 +1319,7 @@ define <4 x float> @vuitofp_v4i32_v4f32(<4 x i32> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
-define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) {
+define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i32_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1331,7 +1331,7 @@ define <4 x double> @vsitofp_v4i32_v4f64(<4 x i32> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i32(<4 x i32>, metadata, metadata)
-define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) {
+define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i32_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1343,7 +1343,7 @@ define <4 x double> @vuitofp_v4i32_v4f64(<4 x i32> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
-define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) {
+define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i32_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1355,7 +1355,7 @@ define <8 x half> @vsitofp_v8i32_v8f16(<8 x i32> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i32(<8 x i32>, metadata, metadata)
-define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) {
+define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i32_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -1367,7 +1367,7 @@ define <8 x half> @vuitofp_v8i32_v8f16(<8 x i32> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
-define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) {
+define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i32_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1378,7 +1378,7 @@ define <8 x float> @vsitofp_v8i32_v8f32(<8 x i32> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i32(<8 x i32>, metadata, metadata)
-define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) {
+define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i32_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1389,7 +1389,7 @@ define <8 x float> @vuitofp_v8i32_v8f32(<8 x i32> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
-define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) {
+define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i32_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1401,7 +1401,7 @@ define <8 x double> @vsitofp_v8i32_v8f64(<8 x i32> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i32(<8 x i32>, metadata, metadata)
-define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) {
+define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i32_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1413,7 +1413,7 @@ define <8 x double> @vuitofp_v8i32_v8f64(<8 x i32> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.sitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
-define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) {
+define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i32_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1425,7 +1425,7 @@ define <16 x half> @vsitofp_v16i32_v16f16(<16 x i32> %va) {
 }
 
 declare <16 x half> @llvm.experimental.constrained.uitofp.v16f16.v16i32(<16 x i32>, metadata, metadata)
-define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) {
+define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i32_v16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
@@ -1437,7 +1437,7 @@ define <16 x half> @vuitofp_v16i32_v16f16(<16 x i32> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.sitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
-define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) {
+define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v16i32_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1448,7 +1448,7 @@ define <16 x float> @vsitofp_v16i32_v16f32(<16 x i32> %va) {
 }
 
 declare <16 x float> @llvm.experimental.constrained.uitofp.v16f32.v16i32(<16 x i32>, metadata, metadata)
-define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) {
+define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v16i32_v16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
@@ -1459,7 +1459,7 @@ define <16 x float> @vuitofp_v16i32_v16f32(<16 x i32> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.sitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
-define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) {
+define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i64_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1472,7 +1472,7 @@ define <1 x half> @vsitofp_v1i64_v1f16(<1 x i64> %va) {
 }
 
 declare <1 x half> @llvm.experimental.constrained.uitofp.v1f16.v1i64(<1 x i64>, metadata, metadata)
-define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) {
+define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i64_v1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1485,7 +1485,7 @@ define <1 x half> @vuitofp_v1i64_v1f16(<1 x i64> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.sitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
-define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) {
+define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i64_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1497,7 +1497,7 @@ define <1 x float> @vsitofp_v1i64_v1f32(<1 x i64> %va) {
 }
 
 declare <1 x float> @llvm.experimental.constrained.uitofp.v1f32.v1i64(<1 x i64>, metadata, metadata)
-define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) {
+define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i64_v1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
@@ -1509,7 +1509,7 @@ define <1 x float> @vuitofp_v1i64_v1f32(<1 x i64> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
-define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) {
+define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v1i64_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1520,7 +1520,7 @@ define <1 x double> @vsitofp_v1i64_v1f64(<1 x i64> %va) {
 }
 
 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
-define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) {
+define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v1i64_v1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
@@ -1532,7 +1532,7 @@ define <1 x double> @vuitofp_v1i64_v1f64(<1 x i64> %va) {
 
 
 declare <2 x half> @llvm.experimental.constrained.sitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
-define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) {
+define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i64_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1545,7 +1545,7 @@ define <2 x half> @vsitofp_v2i64_v2f16(<2 x i64> %va) {
 }
 
 declare <2 x half> @llvm.experimental.constrained.uitofp.v2f16.v2i64(<2 x i64>, metadata, metadata)
-define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) {
+define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i64_v2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1558,7 +1558,7 @@ define <2 x half> @vuitofp_v2i64_v2f16(<2 x i64> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.sitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
-define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) {
+define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i64_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1570,7 +1570,7 @@ define <2 x float> @vsitofp_v2i64_v2f32(<2 x i64> %va) {
 }
 
 declare <2 x float> @llvm.experimental.constrained.uitofp.v2f32.v2i64(<2 x i64>, metadata, metadata)
-define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) {
+define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i64_v2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
@@ -1582,7 +1582,7 @@ define <2 x float> @vuitofp_v2i64_v2f32(<2 x i64> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
-define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) {
+define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v2i64_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -1593,7 +1593,7 @@ define <2 x double> @vsitofp_v2i64_v2f64(<2 x i64> %va) {
 }
 
 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
-define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) {
+define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v2i64_v2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
@@ -1604,7 +1604,7 @@ define <2 x double> @vuitofp_v2i64_v2f64(<2 x i64> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.sitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
-define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) {
+define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i64_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1617,7 +1617,7 @@ define <4 x half> @vsitofp_v4i64_v4f16(<4 x i64> %va) {
 }
 
 declare <4 x half> @llvm.experimental.constrained.uitofp.v4f16.v4i64(<4 x i64>, metadata, metadata)
-define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) {
+define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i64_v4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1630,7 +1630,7 @@ define <4 x half> @vuitofp_v4i64_v4f16(<4 x i64> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
-define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) {
+define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i64_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1642,7 +1642,7 @@ define <4 x float> @vsitofp_v4i64_v4f32(<4 x i64> %va) {
 }
 
 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
-define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) {
+define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i64_v4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
@@ -1654,7 +1654,7 @@ define <4 x float> @vuitofp_v4i64_v4f32(<4 x i64> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.sitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
-define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) {
+define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v4i64_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -1665,7 +1665,7 @@ define <4 x double> @vsitofp_v4i64_v4f64(<4 x i64> %va) {
 }
 
 declare <4 x double> @llvm.experimental.constrained.uitofp.v4f64.v4i64(<4 x i64>, metadata, metadata)
-define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) {
+define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v4i64_v4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
@@ -1676,7 +1676,7 @@ define <4 x double> @vuitofp_v4i64_v4f64(<4 x i64> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.sitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
-define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) {
+define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i64_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1689,7 +1689,7 @@ define <8 x half> @vsitofp_v8i64_v8f16(<8 x i64> %va) {
 }
 
 declare <8 x half> @llvm.experimental.constrained.uitofp.v8f16.v8i64(<8 x i64>, metadata, metadata)
-define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) {
+define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i64_v8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1702,7 +1702,7 @@ define <8 x half> @vuitofp_v8i64_v8f16(<8 x i64> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.sitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
-define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) {
+define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i64_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1714,7 +1714,7 @@ define <8 x float> @vsitofp_v8i64_v8f32(<8 x i64> %va) {
 }
 
 declare <8 x float> @llvm.experimental.constrained.uitofp.v8f32.v8i64(<8 x i64>, metadata, metadata)
-define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) {
+define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i64_v8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
@@ -1726,7 +1726,7 @@ define <8 x float> @vuitofp_v8i64_v8f32(<8 x i64> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.sitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
-define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) {
+define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_v8i64_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
@@ -1737,7 +1737,7 @@ define <8 x double> @vsitofp_v8i64_v8f64(<8 x i64> %va) {
 }
 
 declare <8 x double> @llvm.experimental.constrained.uitofp.v8f64.v8i64(<8 x i64>, metadata, metadata)
-define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) {
+define <8 x double> @vuitofp_v8i64_v8f64(<8 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_v8i64_v8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll
index 6c15f1738cdc88..f90237b8d7e95d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fnearbyint-constrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.nearbyint.nxv1f16(<vscale x 1 x half>, metadata, metadata)
 
-define <vscale x 1 x half> @nearbyint_nxv1f16(<vscale x 1 x half> %v) {
+define <vscale x 1 x half> @nearbyint_nxv1f16(<vscale x 1 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <vscale x 1 x half> @nearbyint_nxv1f16(<vscale x 1 x half> %v) {
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.nearbyint.nxv2f16(<vscale x 2 x half>, metadata, metadata)
 
-define <vscale x 2 x half> @nearbyint_nxv2f16(<vscale x 2 x half> %v) {
+define <vscale x 2 x half> @nearbyint_nxv2f16(<vscale x 2 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -52,7 +52,7 @@ define <vscale x 2 x half> @nearbyint_nxv2f16(<vscale x 2 x half> %v) {
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.nearbyint.nxv4f16(<vscale x 4 x half>, metadata, metadata)
 
-define <vscale x 4 x half> @nearbyint_nxv4f16(<vscale x 4 x half> %v) {
+define <vscale x 4 x half> @nearbyint_nxv4f16(<vscale x 4 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -75,7 +75,7 @@ define <vscale x 4 x half> @nearbyint_nxv4f16(<vscale x 4 x half> %v) {
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.nearbyint.nxv8f16(<vscale x 8 x half>, metadata, metadata)
 
-define <vscale x 8 x half> @nearbyint_nxv8f16(<vscale x 8 x half> %v) {
+define <vscale x 8 x half> @nearbyint_nxv8f16(<vscale x 8 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -98,7 +98,7 @@ define <vscale x 8 x half> @nearbyint_nxv8f16(<vscale x 8 x half> %v) {
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.nearbyint.nxv16f16(<vscale x 16 x half>, metadata, metadata)
 
-define <vscale x 16 x half> @nearbyint_nxv16f16(<vscale x 16 x half> %v) {
+define <vscale x 16 x half> @nearbyint_nxv16f16(<vscale x 16 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -121,7 +121,7 @@ define <vscale x 16 x half> @nearbyint_nxv16f16(<vscale x 16 x half> %v) {
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.nearbyint.nxv32f16(<vscale x 32 x half>, metadata, metadata)
 
-define <vscale x 32 x half> @nearbyint_nxv32f16(<vscale x 32 x half> %v) {
+define <vscale x 32 x half> @nearbyint_nxv32f16(<vscale x 32 x half> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -144,7 +144,7 @@ define <vscale x 32 x half> @nearbyint_nxv32f16(<vscale x 32 x half> %v) {
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.nearbyint.nxv1f32(<vscale x 1 x float>, metadata, metadata)
 
-define <vscale x 1 x float> @nearbyint_nxv1f32(<vscale x 1 x float> %v) {
+define <vscale x 1 x float> @nearbyint_nxv1f32(<vscale x 1 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -167,7 +167,7 @@ define <vscale x 1 x float> @nearbyint_nxv1f32(<vscale x 1 x float> %v) {
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.nearbyint.nxv2f32(<vscale x 2 x float>, metadata, metadata)
 
-define <vscale x 2 x float> @nearbyint_nxv2f32(<vscale x 2 x float> %v) {
+define <vscale x 2 x float> @nearbyint_nxv2f32(<vscale x 2 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -190,7 +190,7 @@ define <vscale x 2 x float> @nearbyint_nxv2f32(<vscale x 2 x float> %v) {
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.nearbyint.nxv4f32(<vscale x 4 x float>, metadata, metadata)
 
-define <vscale x 4 x float> @nearbyint_nxv4f32(<vscale x 4 x float> %v) {
+define <vscale x 4 x float> @nearbyint_nxv4f32(<vscale x 4 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -213,7 +213,7 @@ define <vscale x 4 x float> @nearbyint_nxv4f32(<vscale x 4 x float> %v) {
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.nearbyint.nxv8f32(<vscale x 8 x float>, metadata, metadata)
 
-define <vscale x 8 x float> @nearbyint_nxv8f32(<vscale x 8 x float> %v) {
+define <vscale x 8 x float> @nearbyint_nxv8f32(<vscale x 8 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -236,7 +236,7 @@ define <vscale x 8 x float> @nearbyint_nxv8f32(<vscale x 8 x float> %v) {
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.nearbyint.nxv16f32(<vscale x 16 x float>, metadata, metadata)
 
-define <vscale x 16 x float> @nearbyint_nxv16f32(<vscale x 16 x float> %v) {
+define <vscale x 16 x float> @nearbyint_nxv16f32(<vscale x 16 x float> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -259,7 +259,7 @@ define <vscale x 16 x float> @nearbyint_nxv16f32(<vscale x 16 x float> %v) {
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.nearbyint.nxv1f64(<vscale x 1 x double>, metadata, metadata)
 
-define <vscale x 1 x double> @nearbyint_nxv1f64(<vscale x 1 x double> %v) {
+define <vscale x 1 x double> @nearbyint_nxv1f64(<vscale x 1 x double> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -282,7 +282,7 @@ define <vscale x 1 x double> @nearbyint_nxv1f64(<vscale x 1 x double> %v) {
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.nearbyint.nxv2f64(<vscale x 2 x double>, metadata, metadata)
 
-define <vscale x 2 x double> @nearbyint_nxv2f64(<vscale x 2 x double> %v) {
+define <vscale x 2 x double> @nearbyint_nxv2f64(<vscale x 2 x double> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -305,7 +305,7 @@ define <vscale x 2 x double> @nearbyint_nxv2f64(<vscale x 2 x double> %v) {
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.nearbyint.nxv4f64(<vscale x 4 x double>, metadata, metadata)
 
-define <vscale x 4 x double> @nearbyint_nxv4f64(<vscale x 4 x double> %v) {
+define <vscale x 4 x double> @nearbyint_nxv4f64(<vscale x 4 x double> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -328,7 +328,7 @@ define <vscale x 4 x double> @nearbyint_nxv4f64(<vscale x 4 x double> %v) {
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.nearbyint.nxv8f64(<vscale x 8 x double>, metadata, metadata)
 
-define <vscale x 8 x double> @nearbyint_nxv8f64(<vscale x 8 x double> %v) {
+define <vscale x 8 x double> @nearbyint_nxv8f64(<vscale x 8 x double> %v) strictfp {
 ; CHECK-LABEL: nearbyint_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll
index d0eb5836fb4e9d..3276f481f30ea5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fround-costrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 ; This file tests the code generation for `llvm.experimental.constrained.round.*` on scalable vector type.
 
-define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
+define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) strictfp {
 ; CHECK-LABEL: round_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -28,7 +28,7 @@ define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
 }
 declare <vscale x 1 x half> @llvm.experimental.constrained.round.nxv1f16(<vscale x 1 x half>, metadata)
 
-define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
+define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-LABEL: round_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
 }
 declare <vscale x 2 x half> @llvm.experimental.constrained.round.nxv2f16(<vscale x 2 x half>, metadata)
 
-define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
+define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-LABEL: round_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -72,7 +72,7 @@ define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
 }
 declare <vscale x 4 x half> @llvm.experimental.constrained.round.nxv4f16(<vscale x 4 x half>, metadata)
 
-define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
+define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-LABEL: round_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -94,7 +94,7 @@ define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
 }
 declare <vscale x 8 x half> @llvm.experimental.constrained.round.nxv8f16(<vscale x 8 x half>, metadata)
 
-define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
+define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-LABEL: round_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -116,7 +116,7 @@ define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
 }
 declare <vscale x 16 x half> @llvm.experimental.constrained.round.nxv16f16(<vscale x 16 x half>, metadata)
 
-define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
+define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-LABEL: round_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -138,7 +138,7 @@ define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
 }
 declare <vscale x 32 x half> @llvm.experimental.constrained.round.nxv32f16(<vscale x 32 x half>, metadata)
 
-define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) {
+define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-LABEL: round_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -160,7 +160,7 @@ define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) {
 }
 declare <vscale x 1 x float> @llvm.experimental.constrained.round.nxv1f32(<vscale x 1 x float>, metadata)
 
-define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) {
+define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-LABEL: round_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -182,7 +182,7 @@ define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) {
 }
 declare <vscale x 2 x float> @llvm.experimental.constrained.round.nxv2f32(<vscale x 2 x float>, metadata)
 
-define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) {
+define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-LABEL: round_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -204,7 +204,7 @@ define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) {
 }
 declare <vscale x 4 x float> @llvm.experimental.constrained.round.nxv4f32(<vscale x 4 x float>, metadata)
 
-define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) {
+define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-LABEL: round_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -226,7 +226,7 @@ define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) {
 }
 declare <vscale x 8 x float> @llvm.experimental.constrained.round.nxv8f32(<vscale x 8 x float>, metadata)
 
-define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
+define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) strictfp {
 ; CHECK-LABEL: round_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -248,7 +248,7 @@ define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
 }
 declare <vscale x 16 x float> @llvm.experimental.constrained.round.nxv16f32(<vscale x 16 x float>, metadata)
 
-define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
+define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-LABEL: round_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -270,7 +270,7 @@ define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
 }
 declare <vscale x 1 x double> @llvm.experimental.constrained.round.nxv1f64(<vscale x 1 x double>, metadata)
 
-define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
+define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-LABEL: round_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -292,7 +292,7 @@ define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
 }
 declare <vscale x 2 x double> @llvm.experimental.constrained.round.nxv2f64(<vscale x 2 x double>, metadata)
 
-define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
+define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-LABEL: round_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -314,7 +314,7 @@ define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
 }
 declare <vscale x 4 x double> @llvm.experimental.constrained.round.nxv4f64(<vscale x 4 x double>, metadata)
 
-define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) {
+define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-LABEL: round_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll
index 885ccf2d3b4a1a..4ebfcccbaaa6e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/froundeven-constrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 ; This file tests the code generation for `llvm.experimental.constrained.roundeven.*` on scalable vector type.
 
-define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) {
+define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -28,7 +28,7 @@ define <vscale x 1 x half> @roundeven_nxv1f16(<vscale x 1 x half> %x) {
 }
 declare <vscale x 1 x half> @llvm.experimental.constrained.roundeven.nxv1f16(<vscale x 1 x half>, metadata)
 
-define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) {
+define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -50,7 +50,7 @@ define <vscale x 2 x half> @roundeven_nxv2f16(<vscale x 2 x half> %x) {
 }
 declare <vscale x 2 x half> @llvm.experimental.constrained.roundeven.nxv2f16(<vscale x 2 x half>, metadata)
 
-define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) {
+define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -72,7 +72,7 @@ define <vscale x 4 x half> @roundeven_nxv4f16(<vscale x 4 x half> %x) {
 }
 declare <vscale x 4 x half> @llvm.experimental.constrained.roundeven.nxv4f16(<vscale x 4 x half>, metadata)
 
-define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) {
+define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -94,7 +94,7 @@ define <vscale x 8 x half> @roundeven_nxv8f16(<vscale x 8 x half> %x) {
 }
 declare <vscale x 8 x half> @llvm.experimental.constrained.roundeven.nxv8f16(<vscale x 8 x half>, metadata)
 
-define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) {
+define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -116,7 +116,7 @@ define <vscale x 16 x half> @roundeven_nxv16f16(<vscale x 16 x half> %x) {
 }
 declare <vscale x 16 x half> @llvm.experimental.constrained.roundeven.nxv16f16(<vscale x 16 x half>, metadata)
 
-define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) {
+define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -138,7 +138,7 @@ define <vscale x 32 x half> @roundeven_nxv32f16(<vscale x 32 x half> %x) {
 }
 declare <vscale x 32 x half> @llvm.experimental.constrained.roundeven.nxv32f16(<vscale x 32 x half>, metadata)
 
-define <vscale x 1 x float> @roundeven_nxv1f32(<vscale x 1 x float> %x) {
+define <vscale x 1 x float> @roundeven_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -160,7 +160,7 @@ define <vscale x 1 x float> @roundeven_nxv1f32(<vscale x 1 x float> %x) {
 }
 declare <vscale x 1 x float> @llvm.experimental.constrained.roundeven.nxv1f32(<vscale x 1 x float>, metadata)
 
-define <vscale x 2 x float> @roundeven_nxv2f32(<vscale x 2 x float> %x) {
+define <vscale x 2 x float> @roundeven_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -182,7 +182,7 @@ define <vscale x 2 x float> @roundeven_nxv2f32(<vscale x 2 x float> %x) {
 }
 declare <vscale x 2 x float> @llvm.experimental.constrained.roundeven.nxv2f32(<vscale x 2 x float>, metadata)
 
-define <vscale x 4 x float> @roundeven_nxv4f32(<vscale x 4 x float> %x) {
+define <vscale x 4 x float> @roundeven_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -204,7 +204,7 @@ define <vscale x 4 x float> @roundeven_nxv4f32(<vscale x 4 x float> %x) {
 }
 declare <vscale x 4 x float> @llvm.experimental.constrained.roundeven.nxv4f32(<vscale x 4 x float>, metadata)
 
-define <vscale x 8 x float> @roundeven_nxv8f32(<vscale x 8 x float> %x) {
+define <vscale x 8 x float> @roundeven_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -226,7 +226,7 @@ define <vscale x 8 x float> @roundeven_nxv8f32(<vscale x 8 x float> %x) {
 }
 declare <vscale x 8 x float> @llvm.experimental.constrained.roundeven.nxv8f32(<vscale x 8 x float>, metadata)
 
-define <vscale x 16 x float> @roundeven_nxv16f32(<vscale x 16 x float> %x) {
+define <vscale x 16 x float> @roundeven_nxv16f32(<vscale x 16 x float> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -248,7 +248,7 @@ define <vscale x 16 x float> @roundeven_nxv16f32(<vscale x 16 x float> %x) {
 }
 declare <vscale x 16 x float> @llvm.experimental.constrained.roundeven.nxv16f32(<vscale x 16 x float>, metadata)
 
-define <vscale x 1 x double> @roundeven_nxv1f64(<vscale x 1 x double> %x) {
+define <vscale x 1 x double> @roundeven_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -270,7 +270,7 @@ define <vscale x 1 x double> @roundeven_nxv1f64(<vscale x 1 x double> %x) {
 }
 declare <vscale x 1 x double> @llvm.experimental.constrained.roundeven.nxv1f64(<vscale x 1 x double>, metadata)
 
-define <vscale x 2 x double> @roundeven_nxv2f64(<vscale x 2 x double> %x) {
+define <vscale x 2 x double> @roundeven_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -292,7 +292,7 @@ define <vscale x 2 x double> @roundeven_nxv2f64(<vscale x 2 x double> %x) {
 }
 declare <vscale x 2 x double> @llvm.experimental.constrained.roundeven.nxv2f64(<vscale x 2 x double>, metadata)
 
-define <vscale x 4 x double> @roundeven_nxv4f64(<vscale x 4 x double> %x) {
+define <vscale x 4 x double> @roundeven_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -314,7 +314,7 @@ define <vscale x 4 x double> @roundeven_nxv4f64(<vscale x 4 x double> %x) {
 }
 declare <vscale x 4 x double> @llvm.experimental.constrained.roundeven.nxv4f64(<vscale x 4 x double>, metadata)
 
-define <vscale x 8 x double> @roundeven_nxv8f64(<vscale x 8 x double> %x) {
+define <vscale x 8 x double> @roundeven_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-LABEL: roundeven_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
index 627321f8ba07f5..3665669d83a3d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ftrunc-constrained-sdnode.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
+define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -24,7 +24,7 @@ define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
 }
 declare <vscale x 1 x half> @llvm.experimental.constrained.trunc.nxv1f16(<vscale x 1 x half>, metadata)
 
-define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
+define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -44,7 +44,7 @@ define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
 }
 declare <vscale x 2 x half> @llvm.experimental.constrained.trunc.nxv2f16(<vscale x 2 x half>, metadata)
 
-define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
+define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -64,7 +64,7 @@ define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
 }
 declare <vscale x 4 x half> @llvm.experimental.constrained.trunc.nxv4f16(<vscale x 4 x half>, metadata)
 
-define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
+define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -84,7 +84,7 @@ define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
 }
 declare <vscale x 8 x half> @llvm.experimental.constrained.trunc.nxv8f16(<vscale x 8 x half>, metadata)
 
-define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
+define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -104,7 +104,7 @@ define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
 }
 declare <vscale x 16 x half> @llvm.experimental.constrained.trunc.nxv16f16(<vscale x 16 x half>, metadata)
 
-define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
+define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -124,7 +124,7 @@ define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
 }
 declare <vscale x 32 x half> @llvm.experimental.constrained.trunc.nxv32f16(<vscale x 32 x half>, metadata)
 
-define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) {
+define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -144,7 +144,7 @@ define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) {
 }
 declare <vscale x 1 x float> @llvm.experimental.constrained.trunc.nxv1f32(<vscale x 1 x float>, metadata)
 
-define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) {
+define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) {
 }
 declare <vscale x 2 x float> @llvm.experimental.constrained.trunc.nxv2f32(<vscale x 2 x float>, metadata)
 
-define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) {
+define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -184,7 +184,7 @@ define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) {
 }
 declare <vscale x 4 x float> @llvm.experimental.constrained.trunc.nxv4f32(<vscale x 4 x float>, metadata)
 
-define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) {
+define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -204,7 +204,7 @@ define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) {
 }
 declare <vscale x 8 x float> @llvm.experimental.constrained.trunc.nxv8f32(<vscale x 8 x float>, metadata)
 
-define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) {
+define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -224,7 +224,7 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) {
 }
 declare <vscale x 16 x float> @llvm.experimental.constrained.trunc.nxv16f32(<vscale x 16 x float>, metadata)
 
-define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) {
+define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -244,7 +244,7 @@ define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) {
 }
 declare <vscale x 1 x double> @llvm.experimental.constrained.trunc.nxv1f64(<vscale x 1 x double>, metadata)
 
-define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) {
+define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -264,7 +264,7 @@ define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) {
 }
 declare <vscale x 2 x double> @llvm.experimental.constrained.trunc.nxv2f64(<vscale x 2 x double>, metadata)
 
-define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) {
+define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -284,7 +284,7 @@ define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) {
 }
 declare <vscale x 4 x double> @llvm.experimental.constrained.trunc.nxv4f64(<vscale x 4 x double>, metadata)
 
-define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) {
+define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) strictfp {
 ; CHECK-LABEL: trunc_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll
index 6e5794bfffa0db..04ed41cd0952d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fadd.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
-define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <vscale x 1 x half> %vc
 }
 
-define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fadd.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
-define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x half> %vc
 }
 
-define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -53,7 +53,7 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fadd.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
-define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <vscale x 4 x half> %vc
 }
 
-define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -77,7 +77,7 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fadd.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
-define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -88,7 +88,7 @@ entry:
   ret <vscale x 8 x half> %vc
 }
 
-define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -101,7 +101,7 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fadd.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
-define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -112,7 +112,7 @@ entry:
   ret <vscale x 16 x half> %vc
 }
 
-define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -125,7 +125,7 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fadd.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
-define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -136,7 +136,7 @@ entry:
   ret <vscale x 32 x half> %vc
 }
 
-define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -149,7 +149,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fadd.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
-define <vscale x 1 x float> @vfadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+define <vscale x 1 x float> @vfadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -160,7 +160,7 @@ entry:
   ret <vscale x 1 x float> %vc
 }
 
-define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -173,7 +173,7 @@ define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fadd.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
-define <vscale x 2 x float> @vfadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+define <vscale x 2 x float> @vfadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -184,7 +184,7 @@ entry:
   ret <vscale x 2 x float> %vc
 }
 
-define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -197,7 +197,7 @@ define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fadd.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
-define <vscale x 4 x float> @vfadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+define <vscale x 4 x float> @vfadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -208,7 +208,7 @@ entry:
   ret <vscale x 4 x float> %vc
 }
 
-define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -221,7 +221,7 @@ define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fadd.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
-define <vscale x 8 x float> @vfadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+define <vscale x 8 x float> @vfadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -232,7 +232,7 @@ entry:
   ret <vscale x 8 x float> %vc
 }
 
-define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -245,7 +245,7 @@ define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fadd.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
-define <vscale x 16 x float> @vfadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+define <vscale x 16 x float> @vfadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -256,7 +256,7 @@ entry:
   ret <vscale x 16 x float> %vc
 }
 
-define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -269,7 +269,7 @@ define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fadd.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
-define <vscale x 1 x double> @vfadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+define <vscale x 1 x double> @vfadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 1 x double> %vc
 }
 
-define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -293,7 +293,7 @@ define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fadd.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
-define <vscale x 2 x double> @vfadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+define <vscale x 2 x double> @vfadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -304,7 +304,7 @@ entry:
   ret <vscale x 2 x double> %vc
 }
 
-define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -317,7 +317,7 @@ define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fadd.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
-define <vscale x 4 x double> @vfadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+define <vscale x 4 x double> @vfadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -328,7 +328,7 @@ entry:
   ret <vscale x 4 x double> %vc
 }
 
-define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -341,7 +341,7 @@ define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fadd.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
-define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfadd_vv_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -352,7 +352,7 @@ entry:
   ret <vscale x 8 x double> %vc
 }
 
-define <vscale x 8 x double> @vfadd_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+define <vscale x 8 x double> @vfadd_vf_nxv8f64(<vscale x 8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfadd_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll
index b1bc5855369e41..bd220d24113cf8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fdiv.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
-define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <vscale x 1 x half> %vc
 }
 
-define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fdiv.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
-define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x half> %vc
 }
 
-define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -53,7 +53,7 @@ define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fdiv.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
-define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <vscale x 4 x half> %vc
 }
 
-define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -77,7 +77,7 @@ define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fdiv.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
-define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -88,7 +88,7 @@ entry:
   ret <vscale x 8 x half> %vc
 }
 
-define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -100,7 +100,7 @@ define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
   ret <vscale x 8 x half> %vc
 }
 
-define <vscale x 8 x half> @vfdiv_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+define <vscale x 8 x half> @vfdiv_fv_nxv8f16(<vscale x 8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_fv_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <vscale x 8 x half> @vfdiv_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fdiv.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
-define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -124,7 +124,7 @@ entry:
   ret <vscale x 16 x half> %vc
 }
 
-define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -137,7 +137,7 @@ define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fdiv.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
-define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -148,7 +148,7 @@ entry:
   ret <vscale x 32 x half> %vc
 }
 
-define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -161,7 +161,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fdiv.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
-define <vscale x 1 x float> @vfdiv_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+define <vscale x 1 x float> @vfdiv_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -172,7 +172,7 @@ entry:
   ret <vscale x 1 x float> %vc
 }
 
-define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -185,7 +185,7 @@ define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fdiv.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
-define <vscale x 2 x float> @vfdiv_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+define <vscale x 2 x float> @vfdiv_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -196,7 +196,7 @@ entry:
   ret <vscale x 2 x float> %vc
 }
 
-define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -209,7 +209,7 @@ define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fdiv.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
-define <vscale x 4 x float> @vfdiv_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+define <vscale x 4 x float> @vfdiv_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -220,7 +220,7 @@ entry:
   ret <vscale x 4 x float> %vc
 }
 
-define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -233,7 +233,7 @@ define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fdiv.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
-define <vscale x 8 x float> @vfdiv_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+define <vscale x 8 x float> @vfdiv_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -244,7 +244,7 @@ entry:
   ret <vscale x 8 x float> %vc
 }
 
-define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -256,7 +256,7 @@ define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x float> %vc
 }
 
-define <vscale x 8 x float> @vfdiv_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+define <vscale x 8 x float> @vfdiv_fv_nxv8f32(<vscale x 8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_fv_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -269,7 +269,7 @@ define <vscale x 8 x float> @vfdiv_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fdiv.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
-define <vscale x 16 x float> @vfdiv_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+define <vscale x 16 x float> @vfdiv_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 16 x float> %vc
 }
 
-define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -293,7 +293,7 @@ define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fdiv.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
-define <vscale x 1 x double> @vfdiv_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+define <vscale x 1 x double> @vfdiv_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -304,7 +304,7 @@ entry:
   ret <vscale x 1 x double> %vc
 }
 
-define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -317,7 +317,7 @@ define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fdiv.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
-define <vscale x 2 x double> @vfdiv_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+define <vscale x 2 x double> @vfdiv_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -328,7 +328,7 @@ entry:
   ret <vscale x 2 x double> %vc
 }
 
-define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -341,7 +341,7 @@ define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fdiv.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
-define <vscale x 4 x double> @vfdiv_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+define <vscale x 4 x double> @vfdiv_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -352,7 +352,7 @@ entry:
   ret <vscale x 4 x double> %vc
 }
 
-define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -365,7 +365,7 @@ define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fdiv.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
-define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfdiv_vv_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -376,7 +376,7 @@ entry:
   ret <vscale x 8 x double> %vc
 }
 
-define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -388,7 +388,7 @@ define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x double> %vc
 }
 
-define <vscale x 8 x double> @vfdiv_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+define <vscale x 8 x double> @vfdiv_fv_nxv8f64(<vscale x 8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfdiv_fv_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index 6d978a185cfa0c..ad5506a4d7fe48 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
 
-define <vscale x 1 x half> @vfmadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) {
+define <vscale x 1 x half> @vfmadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -19,7 +19,7 @@ define <vscale x 1 x half> @vfmadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
   ret <vscale x 1 x half> %vd
 }
 
-define <vscale x 1 x half> @vfmadd_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) {
+define <vscale x 1 x half> @vfmadd_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -33,7 +33,7 @@ define <vscale x 1 x half> @vfmadd_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
 
-define <vscale x 2 x half> @vfmadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) {
+define <vscale x 2 x half> @vfmadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -43,7 +43,7 @@ define <vscale x 2 x half> @vfmadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
   ret <vscale x 2 x half> %vd
 }
 
-define <vscale x 2 x half> @vfmadd_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) {
+define <vscale x 2 x half> @vfmadd_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -57,7 +57,7 @@ define <vscale x 2 x half> @vfmadd_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
 
-define <vscale x 4 x half> @vfmadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) {
+define <vscale x 4 x half> @vfmadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -67,7 +67,7 @@ define <vscale x 4 x half> @vfmadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
   ret <vscale x 4 x half> %vd
 }
 
-define <vscale x 4 x half> @vfmadd_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) {
+define <vscale x 4 x half> @vfmadd_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -81,7 +81,7 @@ define <vscale x 4 x half> @vfmadd_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
 
-define <vscale x 8 x half> @vfmadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) {
+define <vscale x 8 x half> @vfmadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -91,7 +91,7 @@ define <vscale x 8 x half> @vfmadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
   ret <vscale x 8 x half> %vd
 }
 
-define <vscale x 8 x half> @vfmadd_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) {
+define <vscale x 8 x half> @vfmadd_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -105,7 +105,7 @@ define <vscale x 8 x half> @vfmadd_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
 
-define <vscale x 16 x half> @vfmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) {
+define <vscale x 16 x half> @vfmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -115,7 +115,7 @@ define <vscale x 16 x half> @vfmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
   ret <vscale x 16 x half> %vd
 }
 
-define <vscale x 16 x half> @vfmadd_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) {
+define <vscale x 16 x half> @vfmadd_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -129,7 +129,7 @@ define <vscale x 16 x half> @vfmadd_vf_nxv16f16(<vscale x 16 x half> %va, <vscal
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
 
-define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
+define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -140,7 +140,7 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
   ret <vscale x 32 x half> %vd
 }
 
-define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) {
+define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -154,7 +154,7 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
 
-define <vscale x 1 x float> @vfmadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) {
+define <vscale x 1 x float> @vfmadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 1 x float> @vfmadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale
   ret <vscale x 1 x float> %vd
 }
 
-define <vscale x 1 x float> @vfmadd_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) {
+define <vscale x 1 x float> @vfmadd_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -178,7 +178,7 @@ define <vscale x 1 x float> @vfmadd_vf_nxv1f32(<vscale x 1 x float> %va, <vscale
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
 
-define <vscale x 2 x float> @vfmadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) {
+define <vscale x 2 x float> @vfmadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -188,7 +188,7 @@ define <vscale x 2 x float> @vfmadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale
   ret <vscale x 2 x float> %vd
 }
 
-define <vscale x 2 x float> @vfmadd_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) {
+define <vscale x 2 x float> @vfmadd_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -202,7 +202,7 @@ define <vscale x 2 x float> @vfmadd_vf_nxv2f32(<vscale x 2 x float> %va, <vscale
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
 
-define <vscale x 4 x float> @vfmadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) {
+define <vscale x 4 x float> @vfmadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -212,7 +212,7 @@ define <vscale x 4 x float> @vfmadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale
   ret <vscale x 4 x float> %vd
 }
 
-define <vscale x 4 x float> @vfmadd_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) {
+define <vscale x 4 x float> @vfmadd_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -226,7 +226,7 @@ define <vscale x 4 x float> @vfmadd_vf_nxv4f32(<vscale x 4 x float> %va, <vscale
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
 
-define <vscale x 8 x float> @vfmadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) {
+define <vscale x 8 x float> @vfmadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -236,7 +236,7 @@ define <vscale x 8 x float> @vfmadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
   ret <vscale x 8 x float> %vd
 }
 
-define <vscale x 8 x float> @vfmadd_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) {
+define <vscale x 8 x float> @vfmadd_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -250,7 +250,7 @@ define <vscale x 8 x float> @vfmadd_vf_nxv8f32(<vscale x 8 x float> %va, <vscale
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
 
-define <vscale x 16 x float> @vfmadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
+define <vscale x 16 x float> @vfmadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -261,7 +261,7 @@ define <vscale x 16 x float> @vfmadd_vv_nxv16f32(<vscale x 16 x float> %va, <vsc
   ret <vscale x 16 x float> %vd
 }
 
-define <vscale x 16 x float> @vfmadd_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) {
+define <vscale x 16 x float> @vfmadd_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -275,7 +275,7 @@ define <vscale x 16 x float> @vfmadd_vf_nxv16f32(<vscale x 16 x float> %va, <vsc
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
 
-define <vscale x 1 x double> @vfmadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) {
+define <vscale x 1 x double> @vfmadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -285,7 +285,7 @@ define <vscale x 1 x double> @vfmadd_vv_nxv1f64(<vscale x 1 x double> %va, <vsca
   ret <vscale x 1 x double> %vd
 }
 
-define <vscale x 1 x double> @vfmadd_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) {
+define <vscale x 1 x double> @vfmadd_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -299,7 +299,7 @@ define <vscale x 1 x double> @vfmadd_vf_nxv1f64(<vscale x 1 x double> %va, <vsca
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
 
-define <vscale x 2 x double> @vfmadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) {
+define <vscale x 2 x double> @vfmadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -309,7 +309,7 @@ define <vscale x 2 x double> @vfmadd_vv_nxv2f64(<vscale x 2 x double> %va, <vsca
   ret <vscale x 2 x double> %vd
 }
 
-define <vscale x 2 x double> @vfmadd_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) {
+define <vscale x 2 x double> @vfmadd_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -323,7 +323,7 @@ define <vscale x 2 x double> @vfmadd_vf_nxv2f64(<vscale x 2 x double> %va, <vsca
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
 
-define <vscale x 4 x double> @vfmadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) {
+define <vscale x 4 x double> @vfmadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -333,7 +333,7 @@ define <vscale x 4 x double> @vfmadd_vv_nxv4f64(<vscale x 4 x double> %va, <vsca
   ret <vscale x 4 x double> %vd
 }
 
-define <vscale x 4 x double> @vfmadd_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) {
+define <vscale x 4 x double> @vfmadd_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -347,7 +347,7 @@ define <vscale x 4 x double> @vfmadd_vf_nxv4f64(<vscale x 4 x double> %va, <vsca
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
 
-define <vscale x 8 x double> @vfmadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
+define <vscale x 8 x double> @vfmadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmadd_vv_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -358,7 +358,7 @@ define <vscale x 8 x double> @vfmadd_vv_nxv8f64(<vscale x 8 x double> %va, <vsca
   ret <vscale x 8 x double> %vd
 }
 
-define <vscale x 8 x double> @vfmadd_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) {
+define <vscale x 8 x double> @vfmadd_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmadd_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
index d0684bd6a9e81d..447303c6f79ae1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
 
-define <vscale x 1 x half> @vfmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) {
+define <vscale x 1 x half> @vfmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -20,7 +20,7 @@ define <vscale x 1 x half> @vfmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
   ret <vscale x 1 x half> %vd
 }
 
-define <vscale x 1 x half> @vfmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) {
+define <vscale x 1 x half> @vfmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @vfmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
 
-define <vscale x 2 x half> @vfmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) {
+define <vscale x 2 x half> @vfmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -46,7 +46,7 @@ define <vscale x 2 x half> @vfmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
   ret <vscale x 2 x half> %vd
 }
 
-define <vscale x 2 x half> @vfmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) {
+define <vscale x 2 x half> @vfmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -61,7 +61,7 @@ define <vscale x 2 x half> @vfmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
 
-define <vscale x 4 x half> @vfmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) {
+define <vscale x 4 x half> @vfmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -72,7 +72,7 @@ define <vscale x 4 x half> @vfmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
   ret <vscale x 4 x half> %vd
 }
 
-define <vscale x 4 x half> @vfmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) {
+define <vscale x 4 x half> @vfmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -87,7 +87,7 @@ define <vscale x 4 x half> @vfmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
 
-define <vscale x 8 x half> @vfmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) {
+define <vscale x 8 x half> @vfmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -98,7 +98,7 @@ define <vscale x 8 x half> @vfmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
   ret <vscale x 8 x half> %vd
 }
 
-define <vscale x 8 x half> @vfmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) {
+define <vscale x 8 x half> @vfmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <vscale x 8 x half> @vfmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
 
-define <vscale x 16 x half> @vfmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) {
+define <vscale x 16 x half> @vfmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -124,7 +124,7 @@ define <vscale x 16 x half> @vfmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
   ret <vscale x 16 x half> %vd
 }
 
-define <vscale x 16 x half> @vfmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) {
+define <vscale x 16 x half> @vfmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -139,7 +139,7 @@ define <vscale x 16 x half> @vfmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscal
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
 
-define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
+define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -151,7 +151,7 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
   ret <vscale x 32 x half> %vd
 }
 
-define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) {
+define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -166,7 +166,7 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
 
-define <vscale x 1 x float> @vfmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) {
+define <vscale x 1 x float> @vfmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -177,7 +177,7 @@ define <vscale x 1 x float> @vfmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale
   ret <vscale x 1 x float> %vd
 }
 
-define <vscale x 1 x float> @vfmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) {
+define <vscale x 1 x float> @vfmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -192,7 +192,7 @@ define <vscale x 1 x float> @vfmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
 
-define <vscale x 2 x float> @vfmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) {
+define <vscale x 2 x float> @vfmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -203,7 +203,7 @@ define <vscale x 2 x float> @vfmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale
   ret <vscale x 2 x float> %vd
 }
 
-define <vscale x 2 x float> @vfmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) {
+define <vscale x 2 x float> @vfmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -218,7 +218,7 @@ define <vscale x 2 x float> @vfmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
 
-define <vscale x 4 x float> @vfmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) {
+define <vscale x 4 x float> @vfmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -229,7 +229,7 @@ define <vscale x 4 x float> @vfmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale
   ret <vscale x 4 x float> %vd
 }
 
-define <vscale x 4 x float> @vfmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) {
+define <vscale x 4 x float> @vfmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -244,7 +244,7 @@ define <vscale x 4 x float> @vfmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
 
-define <vscale x 8 x float> @vfmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) {
+define <vscale x 8 x float> @vfmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -255,7 +255,7 @@ define <vscale x 8 x float> @vfmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale
   ret <vscale x 8 x float> %vd
 }
 
-define <vscale x 8 x float> @vfmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) {
+define <vscale x 8 x float> @vfmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -270,7 +270,7 @@ define <vscale x 8 x float> @vfmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
 
-define <vscale x 16 x float> @vfmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
+define <vscale x 16 x float> @vfmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -282,7 +282,7 @@ define <vscale x 16 x float> @vfmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vsc
   ret <vscale x 16 x float> %vd
 }
 
-define <vscale x 16 x float> @vfmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) {
+define <vscale x 16 x float> @vfmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -297,7 +297,7 @@ define <vscale x 16 x float> @vfmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vsc
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
 
-define <vscale x 1 x double> @vfmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) {
+define <vscale x 1 x double> @vfmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -308,7 +308,7 @@ define <vscale x 1 x double> @vfmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vsca
   ret <vscale x 1 x double> %vd
 }
 
-define <vscale x 1 x double> @vfmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) {
+define <vscale x 1 x double> @vfmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -323,7 +323,7 @@ define <vscale x 1 x double> @vfmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vsca
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
 
-define <vscale x 2 x double> @vfmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) {
+define <vscale x 2 x double> @vfmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -334,7 +334,7 @@ define <vscale x 2 x double> @vfmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vsca
   ret <vscale x 2 x double> %vd
 }
 
-define <vscale x 2 x double> @vfmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) {
+define <vscale x 2 x double> @vfmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -349,7 +349,7 @@ define <vscale x 2 x double> @vfmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vsca
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
 
-define <vscale x 4 x double> @vfmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) {
+define <vscale x 4 x double> @vfmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -360,7 +360,7 @@ define <vscale x 4 x double> @vfmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vsca
   ret <vscale x 4 x double> %vd
 }
 
-define <vscale x 4 x double> @vfmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) {
+define <vscale x 4 x double> @vfmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -375,7 +375,7 @@ define <vscale x 4 x double> @vfmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vsca
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
 
-define <vscale x 8 x double> @vfmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
+define <vscale x 8 x double> @vfmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfmsub_vv_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -387,7 +387,7 @@ define <vscale x 8 x double> @vfmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vsca
   ret <vscale x 8 x double> %vd
 }
 
-define <vscale x 8 x double> @vfmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) {
+define <vscale x 8 x double> @vfmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfmsub_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll
index aa6750387c0627..dbc6125b638f45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fmul.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
-define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <vscale x 1 x half> %vc
 }
 
-define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fmul.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
-define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x half> %vc
 }
 
-define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -53,7 +53,7 @@ define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fmul.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
-define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <vscale x 4 x half> %vc
 }
 
-define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -77,7 +77,7 @@ define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fmul.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
-define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -88,7 +88,7 @@ entry:
   ret <vscale x 8 x half> %vc
 }
 
-define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -101,7 +101,7 @@ define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fmul.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
-define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -112,7 +112,7 @@ entry:
   ret <vscale x 16 x half> %vc
 }
 
-define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -125,7 +125,7 @@ define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fmul.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
-define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -136,7 +136,7 @@ entry:
   ret <vscale x 32 x half> %vc
 }
 
-define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -149,7 +149,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fmul.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
-define <vscale x 1 x float> @vfmul_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+define <vscale x 1 x float> @vfmul_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -160,7 +160,7 @@ entry:
   ret <vscale x 1 x float> %vc
 }
 
-define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -173,7 +173,7 @@ define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fmul.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
-define <vscale x 2 x float> @vfmul_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+define <vscale x 2 x float> @vfmul_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -184,7 +184,7 @@ entry:
   ret <vscale x 2 x float> %vc
 }
 
-define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -197,7 +197,7 @@ define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fmul.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
-define <vscale x 4 x float> @vfmul_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+define <vscale x 4 x float> @vfmul_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -208,7 +208,7 @@ entry:
   ret <vscale x 4 x float> %vc
 }
 
-define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -221,7 +221,7 @@ define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fmul.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
-define <vscale x 8 x float> @vfmul_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+define <vscale x 8 x float> @vfmul_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -232,7 +232,7 @@ entry:
   ret <vscale x 8 x float> %vc
 }
 
-define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -245,7 +245,7 @@ define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fmul.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
-define <vscale x 16 x float> @vfmul_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+define <vscale x 16 x float> @vfmul_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -256,7 +256,7 @@ entry:
   ret <vscale x 16 x float> %vc
 }
 
-define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -269,7 +269,7 @@ define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fmul.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
-define <vscale x 1 x double> @vfmul_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+define <vscale x 1 x double> @vfmul_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 1 x double> %vc
 }
 
-define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -293,7 +293,7 @@ define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fmul.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
-define <vscale x 2 x double> @vfmul_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+define <vscale x 2 x double> @vfmul_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -304,7 +304,7 @@ entry:
   ret <vscale x 2 x double> %vc
 }
 
-define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -317,7 +317,7 @@ define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fmul.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
-define <vscale x 4 x double> @vfmul_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+define <vscale x 4 x double> @vfmul_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -328,7 +328,7 @@ entry:
   ret <vscale x 4 x double> %vc
 }
 
-define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -341,7 +341,7 @@ define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fmul.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
-define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfmul_vv_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -352,7 +352,7 @@ entry:
   ret <vscale x 8 x double> %vc
 }
 
-define <vscale x 8 x double> @vfmul_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+define <vscale x 8 x double> @vfmul_vf_nxv8f64(<vscale x 8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfmul_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index 21aac868c4bb0d..49b8263473ffbf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
 
-define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) {
+define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -21,7 +21,7 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
   ret <vscale x 1 x half> %vd
 }
 
-define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) {
+define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -37,7 +37,7 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
 
-define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) {
+define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -49,7 +49,7 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
   ret <vscale x 2 x half> %vd
 }
 
-define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) {
+define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -65,7 +65,7 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
 
-define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) {
+define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -77,7 +77,7 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
   ret <vscale x 4 x half> %vd
 }
 
-define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) {
+define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -93,7 +93,7 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
 
-define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) {
+define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -105,7 +105,7 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
   ret <vscale x 8 x half> %vd
 }
 
-define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) {
+define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -121,7 +121,7 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
 
-define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) {
+define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -133,7 +133,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
   ret <vscale x 16 x half> %vd
 }
 
-define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) {
+define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -149,7 +149,7 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vsca
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
 
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -162,7 +162,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
   ret <vscale x 32 x half> %vd
 }
 
-define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) {
+define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -178,7 +178,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
 
-define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) {
+define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -190,7 +190,7 @@ define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscal
   ret <vscale x 1 x float> %vd
 }
 
-define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) {
+define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -206,7 +206,7 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscal
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
 
-define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) {
+define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -218,7 +218,7 @@ define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscal
   ret <vscale x 2 x float> %vd
 }
 
-define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) {
+define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -234,7 +234,7 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscal
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
 
-define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) {
+define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -246,7 +246,7 @@ define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscal
   ret <vscale x 4 x float> %vd
 }
 
-define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) {
+define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -262,7 +262,7 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscal
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
 
-define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) {
+define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -274,7 +274,7 @@ define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscal
   ret <vscale x 8 x float> %vd
 }
 
-define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) {
+define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -290,7 +290,7 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscal
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
 
-define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
+define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -303,7 +303,7 @@ define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vs
   ret <vscale x 16 x float> %vd
 }
 
-define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) {
+define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -319,7 +319,7 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vs
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
 
-define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) {
+define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -331,7 +331,7 @@ define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vsc
   ret <vscale x 1 x double> %vd
 }
 
-define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) {
+define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -347,7 +347,7 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vsc
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
 
-define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) {
+define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -359,7 +359,7 @@ define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vsc
   ret <vscale x 2 x double> %vd
 }
 
-define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) {
+define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -375,7 +375,7 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vsc
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
 
-define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) {
+define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -387,7 +387,7 @@ define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vsc
   ret <vscale x 4 x double> %vd
 }
 
-define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) {
+define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -403,7 +403,7 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vsc
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
 
-define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
+define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -416,7 +416,7 @@ define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vsc
   ret <vscale x 8 x double> %vd
 }
 
-define <vscale x 8 x double> @vfnmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) {
+define <vscale x 8 x double> @vfnmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index 89ec2070f4d7c7..c7a1b769f13c63 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -9,7 +9,7 @@
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
 
-define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) {
+define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -20,7 +20,7 @@ define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale
   ret <vscale x 1 x half> %vd
 }
 
-define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) {
+define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -35,7 +35,7 @@ define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
 
-define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) {
+define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -46,7 +46,7 @@ define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale
   ret <vscale x 2 x half> %vd
 }
 
-define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) {
+define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -61,7 +61,7 @@ define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
 
-define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) {
+define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -72,7 +72,7 @@ define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale
   ret <vscale x 4 x half> %vd
 }
 
-define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) {
+define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -87,7 +87,7 @@ define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
 
-define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) {
+define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -98,7 +98,7 @@ define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale
   ret <vscale x 8 x half> %vd
 }
 
-define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) {
+define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
 
-define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) {
+define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -124,7 +124,7 @@ define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vsca
   ret <vscale x 16 x half> %vd
 }
 
-define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) {
+define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -139,7 +139,7 @@ define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vsca
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
 
-define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) {
+define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -151,7 +151,7 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
   ret <vscale x 32 x half> %vd
 }
 
-define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) {
+define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -166,7 +166,7 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
 
-define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) {
+define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -177,7 +177,7 @@ define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscal
   ret <vscale x 1 x float> %vd
 }
 
-define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) {
+define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -192,7 +192,7 @@ define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscal
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
 
-define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) {
+define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -203,7 +203,7 @@ define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscal
   ret <vscale x 2 x float> %vd
 }
 
-define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) {
+define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -218,7 +218,7 @@ define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscal
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
 
-define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) {
+define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -229,7 +229,7 @@ define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscal
   ret <vscale x 4 x float> %vd
 }
 
-define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) {
+define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -244,7 +244,7 @@ define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscal
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
 
-define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) {
+define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -255,7 +255,7 @@ define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscal
   ret <vscale x 8 x float> %vd
 }
 
-define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) {
+define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -270,7 +270,7 @@ define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscal
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
 
-define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) {
+define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -282,7 +282,7 @@ define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vs
   ret <vscale x 16 x float> %vd
 }
 
-define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) {
+define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -297,7 +297,7 @@ define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vs
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
 
-define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) {
+define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -308,7 +308,7 @@ define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vsc
   ret <vscale x 1 x double> %vd
 }
 
-define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) {
+define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -323,7 +323,7 @@ define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vsc
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
 
-define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) {
+define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -334,7 +334,7 @@ define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vsc
   ret <vscale x 2 x double> %vd
 }
 
-define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) {
+define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -349,7 +349,7 @@ define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vsc
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
 
-define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) {
+define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -360,7 +360,7 @@ define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vsc
   ret <vscale x 4 x double> %vd
 }
 
-define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) {
+define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -375,7 +375,7 @@ define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vsc
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
 
-define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) {
+define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) strictfp {
 ; CHECK-LABEL: vfnmsub_vv_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -387,7 +387,7 @@ define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vsc
   ret <vscale x 8 x double> %vd
 }
 
-define <vscale x 8 x double> @vfnmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) {
+define <vscale x 8 x double> @vfnmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) strictfp {
 ; CHECK-LABEL: vfnmsub_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll
index 0772298385d6c3..5de309757c6dcc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fpext.nxv1f32.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x float> @vfpext_nxv1f16_nxv1f32(<vscale x 1 x half> %va) {
+define <vscale x 1 x float> @vfpext_nxv1f16_nxv1f32(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv1f16_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -17,7 +17,7 @@ define <vscale x 1 x float> @vfpext_nxv1f16_nxv1f32(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x double> @vfpext_nxv1f16_nxv1f64(<vscale x 1 x half> %va) {
+define <vscale x 1 x double> @vfpext_nxv1f16_nxv1f64(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv1f16_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -30,7 +30,7 @@ define <vscale x 1 x double> @vfpext_nxv1f16_nxv1f64(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fpext.nxv2f32.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x float> @vfpext_nxv2f16_nxv2f32(<vscale x 2 x half> %va) {
+define <vscale x 2 x float> @vfpext_nxv2f16_nxv2f32(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv2f16_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -42,7 +42,7 @@ define <vscale x 2 x float> @vfpext_nxv2f16_nxv2f32(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x double> @vfpext_nxv2f16_nxv2f64(<vscale x 2 x half> %va) {
+define <vscale x 2 x double> @vfpext_nxv2f16_nxv2f64(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv2f16_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -55,7 +55,7 @@ define <vscale x 2 x double> @vfpext_nxv2f16_nxv2f64(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fpext.nxv4f32.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x float> @vfpext_nxv4f16_nxv4f32(<vscale x 4 x half> %va) {
+define <vscale x 4 x float> @vfpext_nxv4f16_nxv4f32(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv4f16_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -67,7 +67,7 @@ define <vscale x 4 x float> @vfpext_nxv4f16_nxv4f32(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x double> @vfpext_nxv4f16_nxv4f64(<vscale x 4 x half> %va) {
+define <vscale x 4 x double> @vfpext_nxv4f16_nxv4f64(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv4f16_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -80,7 +80,7 @@ define <vscale x 4 x double> @vfpext_nxv4f16_nxv4f64(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fpext.nxv8f32.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x float> @vfpext_nxv8f16_nxv8f32(<vscale x 8 x half> %va) {
+define <vscale x 8 x float> @vfpext_nxv8f16_nxv8f32(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv8f16_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -92,7 +92,7 @@ define <vscale x 8 x float> @vfpext_nxv8f16_nxv8f32(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x double> @vfpext_nxv8f16_nxv8f64(<vscale x 8 x half> %va) {
+define <vscale x 8 x double> @vfpext_nxv8f16_nxv8f64(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv8f16_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -105,7 +105,7 @@ define <vscale x 8 x double> @vfpext_nxv8f16_nxv8f64(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fpext.nxv1f64.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x double> @vfpext_nxv1f32_nxv1f64(<vscale x 1 x float> %va) {
+define <vscale x 1 x double> @vfpext_nxv1f32_nxv1f64(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv1f32_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -117,7 +117,7 @@ define <vscale x 1 x double> @vfpext_nxv1f32_nxv1f64(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fpext.nxv2f64.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64(<vscale x 2 x float> %va) {
+define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv2f32_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -129,7 +129,7 @@ define <vscale x 2 x double> @vfpext_nxv2f32_nxv2f64(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fpext.nxv4f64.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x double> @vfpext_nxv4f32_nxv4f64(<vscale x 4 x float> %va) {
+define <vscale x 4 x double> @vfpext_nxv4f32_nxv4f64(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv4f32_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -141,7 +141,7 @@ define <vscale x 4 x double> @vfpext_nxv4f32_nxv4f64(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fpext.nxv8f64.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x double> @vfpext_nxv8f32_nxv8f64(<vscale x 8 x float> %va) {
+define <vscale x 8 x double> @vfpext_nxv8f32_nxv8f64(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfpext_nxv8f32_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll
index 9b00bdd296007f..47f68837cd578c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoi-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i1> @vfptosi_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
+define <vscale x 1 x i1> @vfptosi_nxv1f16_nxv1i1(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -18,7 +18,7 @@ define <vscale x 1 x i1> @vfptosi_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i1> @vfptoui_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
+define <vscale x 1 x i1> @vfptoui_nxv1f16_nxv1i1(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -31,7 +31,7 @@ define <vscale x 1 x i1> @vfptoui_nxv1f16_nxv1i1(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i7> @llvm.experimental.constrained.fptosi.nxv1i7.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i7> @vfptosi_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
+define <vscale x 1 x i7> @vfptosi_nxv1f16_nxv1i7(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i7:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -43,7 +43,7 @@ define <vscale x 1 x i7> @vfptosi_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i7> @llvm.experimental.constrained.fptoui.nxv1i7.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i7> @vfptoui_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
+define <vscale x 1 x i7> @vfptoui_nxv1f16_nxv1i7(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i7:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -55,7 +55,7 @@ define <vscale x 1 x i7> @vfptoui_nxv1f16_nxv1i7(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i8> @vfptosi_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
+define <vscale x 1 x i8> @vfptosi_nxv1f16_nxv1i8(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -67,7 +67,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i8> @vfptoui_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
+define <vscale x 1 x i8> @vfptoui_nxv1f16_nxv1i8(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -79,7 +79,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f16_nxv1i8(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i16> @vfptosi_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
+define <vscale x 1 x i16> @vfptosi_nxv1f16_nxv1i16(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -90,7 +90,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i16> @vfptoui_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
+define <vscale x 1 x i16> @vfptoui_nxv1f16_nxv1i16(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -101,7 +101,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f16_nxv1i16(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i32> @vfptosi_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
+define <vscale x 1 x i32> @vfptosi_nxv1f16_nxv1i32(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -113,7 +113,7 @@ define <vscale x 1 x i32> @vfptosi_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i32> @vfptoui_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
+define <vscale x 1 x i32> @vfptoui_nxv1f16_nxv1i32(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -125,7 +125,7 @@ define <vscale x 1 x i32> @vfptoui_nxv1f16_nxv1i32(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i64> @vfptosi_nxv1f16_nxv1i64(<vscale x 1 x half> %va) {
+define <vscale x 1 x i64> @vfptosi_nxv1f16_nxv1i64(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -138,7 +138,7 @@ define <vscale x 1 x i64> @vfptosi_nxv1f16_nxv1i64(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f16(<vscale x 1 x half>, metadata)
-define <vscale x 1 x i64> @vfptoui_nxv1f16_nxv1i64(<vscale x 1 x half> %va) {
+define <vscale x 1 x i64> @vfptoui_nxv1f16_nxv1i64(<vscale x 1 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -151,7 +151,7 @@ define <vscale x 1 x i64> @vfptoui_nxv1f16_nxv1i64(<vscale x 1 x half> %va) {
 }
 
 declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i1> @vfptosi_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
+define <vscale x 2 x i1> @vfptosi_nxv2f16_nxv2i1(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -164,7 +164,7 @@ define <vscale x 2 x i1> @vfptosi_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i1> @vfptoui_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
+define <vscale x 2 x i1> @vfptoui_nxv2f16_nxv2i1(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -177,7 +177,7 @@ define <vscale x 2 x i1> @vfptoui_nxv2f16_nxv2i1(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i8> @vfptosi_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
+define <vscale x 2 x i8> @vfptosi_nxv2f16_nxv2i8(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -189,7 +189,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i8> @vfptoui_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
+define <vscale x 2 x i8> @vfptoui_nxv2f16_nxv2i8(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -201,7 +201,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f16_nxv2i8(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i16> @vfptosi_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
+define <vscale x 2 x i16> @vfptosi_nxv2f16_nxv2i16(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -212,7 +212,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i16> @vfptoui_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
+define <vscale x 2 x i16> @vfptoui_nxv2f16_nxv2i16(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -223,7 +223,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f16_nxv2i16(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i32> @vfptosi_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
+define <vscale x 2 x i32> @vfptosi_nxv2f16_nxv2i32(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -235,7 +235,7 @@ define <vscale x 2 x i32> @vfptosi_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i32> @vfptoui_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
+define <vscale x 2 x i32> @vfptoui_nxv2f16_nxv2i32(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -247,7 +247,7 @@ define <vscale x 2 x i32> @vfptoui_nxv2f16_nxv2i32(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i64> @vfptosi_nxv2f16_nxv2i64(<vscale x 2 x half> %va) {
+define <vscale x 2 x i64> @vfptosi_nxv2f16_nxv2i64(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -260,7 +260,7 @@ define <vscale x 2 x i64> @vfptosi_nxv2f16_nxv2i64(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half>, metadata)
-define <vscale x 2 x i64> @vfptoui_nxv2f16_nxv2i64(<vscale x 2 x half> %va) {
+define <vscale x 2 x i64> @vfptoui_nxv2f16_nxv2i64(<vscale x 2 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -273,7 +273,7 @@ define <vscale x 2 x i64> @vfptoui_nxv2f16_nxv2i64(<vscale x 2 x half> %va) {
 }
 
 declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i1> @vfptosi_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
+define <vscale x 4 x i1> @vfptosi_nxv4f16_nxv4i1(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -286,7 +286,7 @@ define <vscale x 4 x i1> @vfptosi_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i1> @vfptoui_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
+define <vscale x 4 x i1> @vfptoui_nxv4f16_nxv4i1(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -299,7 +299,7 @@ define <vscale x 4 x i1> @vfptoui_nxv4f16_nxv4i1(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i8> @vfptosi_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
+define <vscale x 4 x i8> @vfptosi_nxv4f16_nxv4i8(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -311,7 +311,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i8> @vfptoui_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
+define <vscale x 4 x i8> @vfptoui_nxv4f16_nxv4i8(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -323,7 +323,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f16_nxv4i8(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i16> @vfptosi_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
+define <vscale x 4 x i16> @vfptosi_nxv4f16_nxv4i16(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -334,7 +334,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i16> @vfptoui_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
+define <vscale x 4 x i16> @vfptoui_nxv4f16_nxv4i16(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -345,7 +345,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f16_nxv4i16(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i32> @vfptosi_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
+define <vscale x 4 x i32> @vfptosi_nxv4f16_nxv4i32(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -357,7 +357,7 @@ define <vscale x 4 x i32> @vfptosi_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i32> @vfptoui_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
+define <vscale x 4 x i32> @vfptoui_nxv4f16_nxv4i32(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -369,7 +369,7 @@ define <vscale x 4 x i32> @vfptoui_nxv4f16_nxv4i32(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i64> @vfptosi_nxv4f16_nxv4i64(<vscale x 4 x half> %va) {
+define <vscale x 4 x i64> @vfptosi_nxv4f16_nxv4i64(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -382,7 +382,7 @@ define <vscale x 4 x i64> @vfptosi_nxv4f16_nxv4i64(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f16(<vscale x 4 x half>, metadata)
-define <vscale x 4 x i64> @vfptoui_nxv4f16_nxv4i64(<vscale x 4 x half> %va) {
+define <vscale x 4 x i64> @vfptoui_nxv4f16_nxv4i64(<vscale x 4 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -395,7 +395,7 @@ define <vscale x 4 x i64> @vfptoui_nxv4f16_nxv4i64(<vscale x 4 x half> %va) {
 }
 
 declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i1> @vfptosi_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
+define <vscale x 8 x i1> @vfptosi_nxv8f16_nxv8i1(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -408,7 +408,7 @@ define <vscale x 8 x i1> @vfptosi_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i1> @vfptoui_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
+define <vscale x 8 x i1> @vfptoui_nxv8f16_nxv8i1(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -421,7 +421,7 @@ define <vscale x 8 x i1> @vfptoui_nxv8f16_nxv8i1(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i8> @vfptosi_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
+define <vscale x 8 x i8> @vfptosi_nxv8f16_nxv8i8(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -433,7 +433,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i8> @vfptoui_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
+define <vscale x 8 x i8> @vfptoui_nxv8f16_nxv8i8(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -445,7 +445,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f16_nxv8i8(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i16> @vfptosi_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
+define <vscale x 8 x i16> @vfptosi_nxv8f16_nxv8i16(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -456,7 +456,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i16> @vfptoui_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
+define <vscale x 8 x i16> @vfptoui_nxv8f16_nxv8i16(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -467,7 +467,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f16_nxv8i16(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i32> @vfptosi_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
+define <vscale x 8 x i32> @vfptosi_nxv8f16_nxv8i32(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -479,7 +479,7 @@ define <vscale x 8 x i32> @vfptosi_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i32> @vfptoui_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
+define <vscale x 8 x i32> @vfptoui_nxv8f16_nxv8i32(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -491,7 +491,7 @@ define <vscale x 8 x i32> @vfptoui_nxv8f16_nxv8i32(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i64> @vfptosi_nxv8f16_nxv8i64(<vscale x 8 x half> %va) {
+define <vscale x 8 x i64> @vfptosi_nxv8f16_nxv8i64(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -504,7 +504,7 @@ define <vscale x 8 x i64> @vfptosi_nxv8f16_nxv8i64(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f16(<vscale x 8 x half>, metadata)
-define <vscale x 8 x i64> @vfptoui_nxv8f16_nxv8i64(<vscale x 8 x half> %va) {
+define <vscale x 8 x i64> @vfptoui_nxv8f16_nxv8i64(<vscale x 8 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -517,7 +517,7 @@ define <vscale x 8 x i64> @vfptoui_nxv8f16_nxv8i64(<vscale x 8 x half> %va) {
 }
 
 declare <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i1> @vfptosi_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
+define <vscale x 16 x i1> @vfptosi_nxv16f16_nxv16i1(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -530,7 +530,7 @@ define <vscale x 16 x i1> @vfptosi_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
 }
 
 declare <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i1> @vfptoui_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
+define <vscale x 16 x i1> @vfptoui_nxv16f16_nxv16i1(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -543,7 +543,7 @@ define <vscale x 16 x i1> @vfptoui_nxv16f16_nxv16i1(<vscale x 16 x half> %va) {
 }
 
 declare <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i8> @vfptosi_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
+define <vscale x 16 x i8> @vfptosi_nxv16f16_nxv16i8(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -555,7 +555,7 @@ define <vscale x 16 x i8> @vfptosi_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
 }
 
 declare <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i8> @vfptoui_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
+define <vscale x 16 x i8> @vfptoui_nxv16f16_nxv16i8(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -567,7 +567,7 @@ define <vscale x 16 x i8> @vfptoui_nxv16f16_nxv16i8(<vscale x 16 x half> %va) {
 }
 
 declare <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i16> @vfptosi_nxv16f16_nxv16i16(<vscale x 16 x half> %va) {
+define <vscale x 16 x i16> @vfptosi_nxv16f16_nxv16i16(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -578,7 +578,7 @@ define <vscale x 16 x i16> @vfptosi_nxv16f16_nxv16i16(<vscale x 16 x half> %va)
 }
 
 declare <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i16> @vfptoui_nxv16f16_nxv16i16(<vscale x 16 x half> %va) {
+define <vscale x 16 x i16> @vfptoui_nxv16f16_nxv16i16(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -589,7 +589,7 @@ define <vscale x 16 x i16> @vfptoui_nxv16f16_nxv16i16(<vscale x 16 x half> %va)
 }
 
 declare <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i32> @vfptosi_nxv16f16_nxv16i32(<vscale x 16 x half> %va) {
+define <vscale x 16 x i32> @vfptosi_nxv16f16_nxv16i32(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -601,7 +601,7 @@ define <vscale x 16 x i32> @vfptosi_nxv16f16_nxv16i32(<vscale x 16 x half> %va)
 }
 
 declare <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f16(<vscale x 16 x half>, metadata)
-define <vscale x 16 x i32> @vfptoui_nxv16f16_nxv16i32(<vscale x 16 x half> %va) {
+define <vscale x 16 x i32> @vfptoui_nxv16f16_nxv16i32(<vscale x 16 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -613,7 +613,7 @@ define <vscale x 16 x i32> @vfptoui_nxv16f16_nxv16i32(<vscale x 16 x half> %va)
 }
 
 declare <vscale x 32 x i1> @llvm.experimental.constrained.fptosi.nxv32i1.nxv32f16(<vscale x 32 x half>, metadata)
-define <vscale x 32 x i1> @vfptosi_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
+define <vscale x 32 x i1> @vfptosi_nxv32f16_nxv32i1(<vscale x 32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -626,7 +626,7 @@ define <vscale x 32 x i1> @vfptosi_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
 }
 
 declare <vscale x 32 x i1> @llvm.experimental.constrained.fptoui.nxv32i1.nxv32f16(<vscale x 32 x half>, metadata)
-define <vscale x 32 x i1> @vfptoui_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
+define <vscale x 32 x i1> @vfptoui_nxv32f16_nxv32i1(<vscale x 32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -639,7 +639,7 @@ define <vscale x 32 x i1> @vfptoui_nxv32f16_nxv32i1(<vscale x 32 x half> %va) {
 }
 
 declare <vscale x 32 x i8> @llvm.experimental.constrained.fptosi.nxv32i8.nxv32f16(<vscale x 32 x half>, metadata)
-define <vscale x 32 x i8> @vfptosi_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
+define <vscale x 32 x i8> @vfptosi_nxv32f16_nxv32i8(<vscale x 32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -651,7 +651,7 @@ define <vscale x 32 x i8> @vfptosi_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
 }
 
 declare <vscale x 32 x i8> @llvm.experimental.constrained.fptoui.nxv32i8.nxv32f16(<vscale x 32 x half>, metadata)
-define <vscale x 32 x i8> @vfptoui_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
+define <vscale x 32 x i8> @vfptoui_nxv32f16_nxv32i8(<vscale x 32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -663,7 +663,7 @@ define <vscale x 32 x i8> @vfptoui_nxv32f16_nxv32i8(<vscale x 32 x half> %va) {
 }
 
 declare <vscale x 32 x i16> @llvm.experimental.constrained.fptosi.nxv32i16.nxv32f16(<vscale x 32 x half>, metadata)
-define <vscale x 32 x i16> @vfptosi_nxv32f16_nxv32i16(<vscale x 32 x half> %va) {
+define <vscale x 32 x i16> @vfptosi_nxv32f16_nxv32i16(<vscale x 32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -674,7 +674,7 @@ define <vscale x 32 x i16> @vfptosi_nxv32f16_nxv32i16(<vscale x 32 x half> %va)
 }
 
 declare <vscale x 32 x i16> @llvm.experimental.constrained.fptoui.nxv32i16.nxv32f16(<vscale x 32 x half>, metadata)
-define <vscale x 32 x i16> @vfptoui_nxv32f16_nxv32i16(<vscale x 32 x half> %va) {
+define <vscale x 32 x i16> @vfptoui_nxv32f16_nxv32i16(<vscale x 32 x half> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -685,7 +685,7 @@ define <vscale x 32 x i16> @vfptoui_nxv32f16_nxv32i16(<vscale x 32 x half> %va)
 }
 
 declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i1> @vfptosi_nxv1f32_nxv1i1(<vscale x 1 x float> %va) {
+define <vscale x 1 x i1> @vfptosi_nxv1f32_nxv1i1(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -698,7 +698,7 @@ define <vscale x 1 x i1> @vfptosi_nxv1f32_nxv1i1(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i1> @vfptoui_nxv1f32_nxv1i1(<vscale x 1 x float> %va) {
+define <vscale x 1 x i1> @vfptoui_nxv1f32_nxv1i1(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -711,7 +711,7 @@ define <vscale x 1 x i1> @vfptoui_nxv1f32_nxv1i1(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
+define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -724,7 +724,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
+define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -737,7 +737,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i16> @vfptosi_nxv1f32_nxv1i16(<vscale x 1 x float> %va) {
+define <vscale x 1 x i16> @vfptosi_nxv1f32_nxv1i16(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -749,7 +749,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f32_nxv1i16(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i16> @vfptoui_nxv1f32_nxv1i16(<vscale x 1 x float> %va) {
+define <vscale x 1 x i16> @vfptoui_nxv1f32_nxv1i16(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -761,7 +761,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f32_nxv1i16(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i32> @vfptosi_nxv1f32_nxv1i32(<vscale x 1 x float> %va) {
+define <vscale x 1 x i32> @vfptosi_nxv1f32_nxv1i32(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -772,7 +772,7 @@ define <vscale x 1 x i32> @vfptosi_nxv1f32_nxv1i32(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i32> @vfptoui_nxv1f32_nxv1i32(<vscale x 1 x float> %va) {
+define <vscale x 1 x i32> @vfptoui_nxv1f32_nxv1i32(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -783,7 +783,7 @@ define <vscale x 1 x i32> @vfptoui_nxv1f32_nxv1i32(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i64> @vfptosi_nxv1f32_nxv1i64(<vscale x 1 x float> %va) {
+define <vscale x 1 x i64> @vfptosi_nxv1f32_nxv1i64(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -795,7 +795,7 @@ define <vscale x 1 x i64> @vfptosi_nxv1f32_nxv1i64(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f32(<vscale x 1 x float>, metadata)
-define <vscale x 1 x i64> @vfptoui_nxv1f32_nxv1i64(<vscale x 1 x float> %va) {
+define <vscale x 1 x i64> @vfptoui_nxv1f32_nxv1i64(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -807,7 +807,7 @@ define <vscale x 1 x i64> @vfptoui_nxv1f32_nxv1i64(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i1> @vfptosi_nxv2f32_nxv2i1(<vscale x 2 x float> %va) {
+define <vscale x 2 x i1> @vfptosi_nxv2f32_nxv2i1(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -820,7 +820,7 @@ define <vscale x 2 x i1> @vfptosi_nxv2f32_nxv2i1(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i1> @vfptoui_nxv2f32_nxv2i1(<vscale x 2 x float> %va) {
+define <vscale x 2 x i1> @vfptoui_nxv2f32_nxv2i1(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -833,7 +833,7 @@ define <vscale x 2 x i1> @vfptoui_nxv2f32_nxv2i1(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
+define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -846,7 +846,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
+define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -859,7 +859,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i16> @vfptosi_nxv2f32_nxv2i16(<vscale x 2 x float> %va) {
+define <vscale x 2 x i16> @vfptosi_nxv2f32_nxv2i16(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -871,7 +871,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f32_nxv2i16(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i16> @vfptoui_nxv2f32_nxv2i16(<vscale x 2 x float> %va) {
+define <vscale x 2 x i16> @vfptoui_nxv2f32_nxv2i16(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -883,7 +883,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f32_nxv2i16(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i32> @vfptosi_nxv2f32_nxv2i32(<vscale x 2 x float> %va) {
+define <vscale x 2 x i32> @vfptosi_nxv2f32_nxv2i32(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -894,7 +894,7 @@ define <vscale x 2 x i32> @vfptosi_nxv2f32_nxv2i32(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i32> @vfptoui_nxv2f32_nxv2i32(<vscale x 2 x float> %va) {
+define <vscale x 2 x i32> @vfptoui_nxv2f32_nxv2i32(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -905,7 +905,7 @@ define <vscale x 2 x i32> @vfptoui_nxv2f32_nxv2i32(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i64> @vfptosi_nxv2f32_nxv2i64(<vscale x 2 x float> %va) {
+define <vscale x 2 x i64> @vfptosi_nxv2f32_nxv2i64(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -917,7 +917,7 @@ define <vscale x 2 x i64> @vfptosi_nxv2f32_nxv2i64(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f32(<vscale x 2 x float>, metadata)
-define <vscale x 2 x i64> @vfptoui_nxv2f32_nxv2i64(<vscale x 2 x float> %va) {
+define <vscale x 2 x i64> @vfptoui_nxv2f32_nxv2i64(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -929,7 +929,7 @@ define <vscale x 2 x i64> @vfptoui_nxv2f32_nxv2i64(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i1> @vfptosi_nxv4f32_nxv4i1(<vscale x 4 x float> %va) {
+define <vscale x 4 x i1> @vfptosi_nxv4f32_nxv4i1(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -942,7 +942,7 @@ define <vscale x 4 x i1> @vfptosi_nxv4f32_nxv4i1(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i1> @vfptoui_nxv4f32_nxv4i1(<vscale x 4 x float> %va) {
+define <vscale x 4 x i1> @vfptoui_nxv4f32_nxv4i1(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -955,7 +955,7 @@ define <vscale x 4 x i1> @vfptoui_nxv4f32_nxv4i1(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
+define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -968,7 +968,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
+define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -981,7 +981,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i16> @vfptosi_nxv4f32_nxv4i16(<vscale x 4 x float> %va) {
+define <vscale x 4 x i16> @vfptosi_nxv4f32_nxv4i16(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -993,7 +993,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f32_nxv4i16(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i16> @vfptoui_nxv4f32_nxv4i16(<vscale x 4 x float> %va) {
+define <vscale x 4 x i16> @vfptoui_nxv4f32_nxv4i16(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -1005,7 +1005,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f32_nxv4i16(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i32> @vfptosi_nxv4f32_nxv4i32(<vscale x 4 x float> %va) {
+define <vscale x 4 x i32> @vfptosi_nxv4f32_nxv4i32(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1016,7 +1016,7 @@ define <vscale x 4 x i32> @vfptosi_nxv4f32_nxv4i32(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i32> @vfptoui_nxv4f32_nxv4i32(<vscale x 4 x float> %va) {
+define <vscale x 4 x i32> @vfptoui_nxv4f32_nxv4i32(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1027,7 +1027,7 @@ define <vscale x 4 x i32> @vfptoui_nxv4f32_nxv4i32(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i64> @vfptosi_nxv4f32_nxv4i64(<vscale x 4 x float> %va) {
+define <vscale x 4 x i64> @vfptosi_nxv4f32_nxv4i64(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1039,7 +1039,7 @@ define <vscale x 4 x i64> @vfptosi_nxv4f32_nxv4i64(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f32(<vscale x 4 x float>, metadata)
-define <vscale x 4 x i64> @vfptoui_nxv4f32_nxv4i64(<vscale x 4 x float> %va) {
+define <vscale x 4 x i64> @vfptoui_nxv4f32_nxv4i64(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1051,7 +1051,7 @@ define <vscale x 4 x i64> @vfptoui_nxv4f32_nxv4i64(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i1> @vfptosi_nxv8f32_nxv8i1(<vscale x 8 x float> %va) {
+define <vscale x 8 x i1> @vfptosi_nxv8f32_nxv8i1(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1064,7 +1064,7 @@ define <vscale x 8 x i1> @vfptosi_nxv8f32_nxv8i1(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i1> @vfptoui_nxv8f32_nxv8i1(<vscale x 8 x float> %va) {
+define <vscale x 8 x i1> @vfptoui_nxv8f32_nxv8i1(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1077,7 +1077,7 @@ define <vscale x 8 x i1> @vfptoui_nxv8f32_nxv8i1(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
+define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1090,7 +1090,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
+define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1103,7 +1103,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i16> @vfptosi_nxv8f32_nxv8i16(<vscale x 8 x float> %va) {
+define <vscale x 8 x i16> @vfptosi_nxv8f32_nxv8i16(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1115,7 +1115,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f32_nxv8i16(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i16> @vfptoui_nxv8f32_nxv8i16(<vscale x 8 x float> %va) {
+define <vscale x 8 x i16> @vfptoui_nxv8f32_nxv8i16(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1127,7 +1127,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f32_nxv8i16(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i32> @vfptosi_nxv8f32_nxv8i32(<vscale x 8 x float> %va) {
+define <vscale x 8 x i32> @vfptosi_nxv8f32_nxv8i32(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1138,7 +1138,7 @@ define <vscale x 8 x i32> @vfptosi_nxv8f32_nxv8i32(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i32> @vfptoui_nxv8f32_nxv8i32(<vscale x 8 x float> %va) {
+define <vscale x 8 x i32> @vfptoui_nxv8f32_nxv8i32(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1149,7 +1149,7 @@ define <vscale x 8 x i32> @vfptoui_nxv8f32_nxv8i32(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i64> @vfptosi_nxv8f32_nxv8i64(<vscale x 8 x float> %va) {
+define <vscale x 8 x i64> @vfptosi_nxv8f32_nxv8i64(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1161,7 +1161,7 @@ define <vscale x 8 x i64> @vfptosi_nxv8f32_nxv8i64(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f32(<vscale x 8 x float>, metadata)
-define <vscale x 8 x i64> @vfptoui_nxv8f32_nxv8i64(<vscale x 8 x float> %va) {
+define <vscale x 8 x i64> @vfptoui_nxv8f32_nxv8i64(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1173,7 +1173,7 @@ define <vscale x 8 x i64> @vfptoui_nxv8f32_nxv8i64(<vscale x 8 x float> %va) {
 }
 
 declare <vscale x 16 x i1> @llvm.experimental.constrained.fptosi.nxv16i1.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i1> @vfptosi_nxv16f32_nxv16i1(<vscale x 16 x float> %va) {
+define <vscale x 16 x i1> @vfptosi_nxv16f32_nxv16i1(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1186,7 +1186,7 @@ define <vscale x 16 x i1> @vfptosi_nxv16f32_nxv16i1(<vscale x 16 x float> %va) {
 }
 
 declare <vscale x 16 x i1> @llvm.experimental.constrained.fptoui.nxv16i1.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i1> @vfptoui_nxv16f32_nxv16i1(<vscale x 16 x float> %va) {
+define <vscale x 16 x i1> @vfptoui_nxv16f32_nxv16i1(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1199,7 +1199,7 @@ define <vscale x 16 x i1> @vfptoui_nxv16f32_nxv16i1(<vscale x 16 x float> %va) {
 }
 
 declare <vscale x 16 x i8> @llvm.experimental.constrained.fptosi.nxv16i8.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
+define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1212,7 +1212,7 @@ define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
 }
 
 declare <vscale x 16 x i8> @llvm.experimental.constrained.fptoui.nxv16i8.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
+define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1225,7 +1225,7 @@ define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
 }
 
 declare <vscale x 16 x i16> @llvm.experimental.constrained.fptosi.nxv16i16.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i16> @vfptosi_nxv16f32_nxv16i16(<vscale x 16 x float> %va) {
+define <vscale x 16 x i16> @vfptosi_nxv16f32_nxv16i16(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1237,7 +1237,7 @@ define <vscale x 16 x i16> @vfptosi_nxv16f32_nxv16i16(<vscale x 16 x float> %va)
 }
 
 declare <vscale x 16 x i16> @llvm.experimental.constrained.fptoui.nxv16i16.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i16> @vfptoui_nxv16f32_nxv16i16(<vscale x 16 x float> %va) {
+define <vscale x 16 x i16> @vfptoui_nxv16f32_nxv16i16(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1249,7 +1249,7 @@ define <vscale x 16 x i16> @vfptoui_nxv16f32_nxv16i16(<vscale x 16 x float> %va)
 }
 
 declare <vscale x 16 x i32> @llvm.experimental.constrained.fptosi.nxv16i32.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i32> @vfptosi_nxv16f32_nxv16i32(<vscale x 16 x float> %va) {
+define <vscale x 16 x i32> @vfptosi_nxv16f32_nxv16i32(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -1260,7 +1260,7 @@ define <vscale x 16 x i32> @vfptosi_nxv16f32_nxv16i32(<vscale x 16 x float> %va)
 }
 
 declare <vscale x 16 x i32> @llvm.experimental.constrained.fptoui.nxv16i32.nxv16f32(<vscale x 16 x float>, metadata)
-define <vscale x 16 x i32> @vfptoui_nxv16f32_nxv16i32(<vscale x 16 x float> %va) {
+define <vscale x 16 x i32> @vfptoui_nxv16f32_nxv16i32(<vscale x 16 x float> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -1271,7 +1271,7 @@ define <vscale x 16 x i32> @vfptoui_nxv16f32_nxv16i32(<vscale x 16 x float> %va)
 }
 
 declare <vscale x 1 x i1> @llvm.experimental.constrained.fptosi.nxv1i1.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i1> @vfptosi_nxv1f64_nxv1i1(<vscale x 1 x double> %va) {
+define <vscale x 1 x i1> @vfptosi_nxv1f64_nxv1i1(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1284,7 +1284,7 @@ define <vscale x 1 x i1> @vfptosi_nxv1f64_nxv1i1(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i1> @llvm.experimental.constrained.fptoui.nxv1i1.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i1> @vfptoui_nxv1f64_nxv1i1(<vscale x 1 x double> %va) {
+define <vscale x 1 x i1> @vfptoui_nxv1f64_nxv1i1(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1297,7 +1297,7 @@ define <vscale x 1 x i1> @vfptoui_nxv1f64_nxv1i1(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i8> @llvm.experimental.constrained.fptosi.nxv1i8.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
+define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1312,7 +1312,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i8> @llvm.experimental.constrained.fptoui.nxv1i8.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
+define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1327,7 +1327,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i16> @llvm.experimental.constrained.fptosi.nxv1i16.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
+define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1340,7 +1340,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i16> @llvm.experimental.constrained.fptoui.nxv1i16.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
+define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1353,7 +1353,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i32> @llvm.experimental.constrained.fptosi.nxv1i32.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i32> @vfptosi_nxv1f64_nxv1i32(<vscale x 1 x double> %va) {
+define <vscale x 1 x i32> @vfptosi_nxv1f64_nxv1i32(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1365,7 +1365,7 @@ define <vscale x 1 x i32> @vfptosi_nxv1f64_nxv1i32(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i32> @llvm.experimental.constrained.fptoui.nxv1i32.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i32> @vfptoui_nxv1f64_nxv1i32(<vscale x 1 x double> %va) {
+define <vscale x 1 x i32> @vfptoui_nxv1f64_nxv1i32(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1377,7 +1377,7 @@ define <vscale x 1 x i32> @vfptoui_nxv1f64_nxv1i32(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i64> @llvm.experimental.constrained.fptosi.nxv1i64.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i64> @vfptosi_nxv1f64_nxv1i64(<vscale x 1 x double> %va) {
+define <vscale x 1 x i64> @vfptosi_nxv1f64_nxv1i64(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -1388,7 +1388,7 @@ define <vscale x 1 x i64> @vfptosi_nxv1f64_nxv1i64(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 1 x i64> @llvm.experimental.constrained.fptoui.nxv1i64.nxv1f64(<vscale x 1 x double>, metadata)
-define <vscale x 1 x i64> @vfptoui_nxv1f64_nxv1i64(<vscale x 1 x double> %va) {
+define <vscale x 1 x i64> @vfptoui_nxv1f64_nxv1i64(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -1399,7 +1399,7 @@ define <vscale x 1 x i64> @vfptoui_nxv1f64_nxv1i64(<vscale x 1 x double> %va) {
 }
 
 declare <vscale x 2 x i1> @llvm.experimental.constrained.fptosi.nxv2i1.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i1> @vfptosi_nxv2f64_nxv2i1(<vscale x 2 x double> %va) {
+define <vscale x 2 x i1> @vfptosi_nxv2f64_nxv2i1(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1412,7 +1412,7 @@ define <vscale x 2 x i1> @vfptosi_nxv2f64_nxv2i1(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i1> @llvm.experimental.constrained.fptoui.nxv2i1.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i1> @vfptoui_nxv2f64_nxv2i1(<vscale x 2 x double> %va) {
+define <vscale x 2 x i1> @vfptoui_nxv2f64_nxv2i1(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1425,7 +1425,7 @@ define <vscale x 2 x i1> @vfptoui_nxv2f64_nxv2i1(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i8> @llvm.experimental.constrained.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
+define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1440,7 +1440,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i8> @llvm.experimental.constrained.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
+define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1455,7 +1455,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i16> @llvm.experimental.constrained.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
+define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1468,7 +1468,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i16> @llvm.experimental.constrained.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
+define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1481,7 +1481,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i32> @llvm.experimental.constrained.fptosi.nxv2i32.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i32> @vfptosi_nxv2f64_nxv2i32(<vscale x 2 x double> %va) {
+define <vscale x 2 x i32> @vfptosi_nxv2f64_nxv2i32(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1493,7 +1493,7 @@ define <vscale x 2 x i32> @vfptosi_nxv2f64_nxv2i32(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i32> @llvm.experimental.constrained.fptoui.nxv2i32.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i32> @vfptoui_nxv2f64_nxv2i32(<vscale x 2 x double> %va) {
+define <vscale x 2 x i32> @vfptoui_nxv2f64_nxv2i32(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1505,7 +1505,7 @@ define <vscale x 2 x i32> @vfptoui_nxv2f64_nxv2i32(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i64> @llvm.experimental.constrained.fptosi.nxv2i64.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i64> @vfptosi_nxv2f64_nxv2i64(<vscale x 2 x double> %va) {
+define <vscale x 2 x i64> @vfptosi_nxv2f64_nxv2i64(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -1516,7 +1516,7 @@ define <vscale x 2 x i64> @vfptosi_nxv2f64_nxv2i64(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 2 x i64> @llvm.experimental.constrained.fptoui.nxv2i64.nxv2f64(<vscale x 2 x double>, metadata)
-define <vscale x 2 x i64> @vfptoui_nxv2f64_nxv2i64(<vscale x 2 x double> %va) {
+define <vscale x 2 x i64> @vfptoui_nxv2f64_nxv2i64(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -1527,7 +1527,7 @@ define <vscale x 2 x i64> @vfptoui_nxv2f64_nxv2i64(<vscale x 2 x double> %va) {
 }
 
 declare <vscale x 4 x i1> @llvm.experimental.constrained.fptosi.nxv4i1.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i1> @vfptosi_nxv4f64_nxv4i1(<vscale x 4 x double> %va) {
+define <vscale x 4 x i1> @vfptosi_nxv4f64_nxv4i1(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1540,7 +1540,7 @@ define <vscale x 4 x i1> @vfptosi_nxv4f64_nxv4i1(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i1> @llvm.experimental.constrained.fptoui.nxv4i1.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i1> @vfptoui_nxv4f64_nxv4i1(<vscale x 4 x double> %va) {
+define <vscale x 4 x i1> @vfptoui_nxv4f64_nxv4i1(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1553,7 +1553,7 @@ define <vscale x 4 x i1> @vfptoui_nxv4f64_nxv4i1(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i8> @llvm.experimental.constrained.fptosi.nxv4i8.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
+define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1568,7 +1568,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i8> @llvm.experimental.constrained.fptoui.nxv4i8.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
+define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1583,7 +1583,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i16> @llvm.experimental.constrained.fptosi.nxv4i16.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
+define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1596,7 +1596,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i16> @llvm.experimental.constrained.fptoui.nxv4i16.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
+define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1609,7 +1609,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i32> @llvm.experimental.constrained.fptosi.nxv4i32.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i32> @vfptosi_nxv4f64_nxv4i32(<vscale x 4 x double> %va) {
+define <vscale x 4 x i32> @vfptosi_nxv4f64_nxv4i32(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1621,7 +1621,7 @@ define <vscale x 4 x i32> @vfptosi_nxv4f64_nxv4i32(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i32> @llvm.experimental.constrained.fptoui.nxv4i32.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i32> @vfptoui_nxv4f64_nxv4i32(<vscale x 4 x double> %va) {
+define <vscale x 4 x i32> @vfptoui_nxv4f64_nxv4i32(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1633,7 +1633,7 @@ define <vscale x 4 x i32> @vfptoui_nxv4f64_nxv4i32(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i64> @llvm.experimental.constrained.fptosi.nxv4i64.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i64> @vfptosi_nxv4f64_nxv4i64(<vscale x 4 x double> %va) {
+define <vscale x 4 x i64> @vfptosi_nxv4f64_nxv4i64(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -1644,7 +1644,7 @@ define <vscale x 4 x i64> @vfptosi_nxv4f64_nxv4i64(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 4 x i64> @llvm.experimental.constrained.fptoui.nxv4i64.nxv4f64(<vscale x 4 x double>, metadata)
-define <vscale x 4 x i64> @vfptoui_nxv4f64_nxv4i64(<vscale x 4 x double> %va) {
+define <vscale x 4 x i64> @vfptoui_nxv4f64_nxv4i64(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -1655,7 +1655,7 @@ define <vscale x 4 x i64> @vfptoui_nxv4f64_nxv4i64(<vscale x 4 x double> %va) {
 }
 
 declare <vscale x 8 x i1> @llvm.experimental.constrained.fptosi.nxv8i1.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i1> @vfptosi_nxv8f64_nxv8i1(<vscale x 8 x double> %va) {
+define <vscale x 8 x i1> @vfptosi_nxv8f64_nxv8i1(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1668,7 +1668,7 @@ define <vscale x 8 x i1> @vfptosi_nxv8f64_nxv8i1(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i1> @llvm.experimental.constrained.fptoui.nxv8i1.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i1> @vfptoui_nxv8f64_nxv8i1(<vscale x 8 x double> %va) {
+define <vscale x 8 x i1> @vfptoui_nxv8f64_nxv8i1(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1681,7 +1681,7 @@ define <vscale x 8 x i1> @vfptoui_nxv8f64_nxv8i1(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i8> @llvm.experimental.constrained.fptosi.nxv8i8.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
+define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1696,7 +1696,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i8> @llvm.experimental.constrained.fptoui.nxv8i8.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
+define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1711,7 +1711,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i16> @llvm.experimental.constrained.fptosi.nxv8i16.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
+define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1724,7 +1724,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i16> @llvm.experimental.constrained.fptoui.nxv8i16.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
+define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1737,7 +1737,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i32> @llvm.experimental.constrained.fptosi.nxv8i32.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i32> @vfptosi_nxv8f64_nxv8i32(<vscale x 8 x double> %va) {
+define <vscale x 8 x i32> @vfptosi_nxv8f64_nxv8i32(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1749,7 +1749,7 @@ define <vscale x 8 x i32> @vfptosi_nxv8f64_nxv8i32(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i32> @llvm.experimental.constrained.fptoui.nxv8i32.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i32> @vfptoui_nxv8f64_nxv8i32(<vscale x 8 x double> %va) {
+define <vscale x 8 x i32> @vfptoui_nxv8f64_nxv8i32(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1761,7 +1761,7 @@ define <vscale x 8 x i32> @vfptoui_nxv8f64_nxv8i32(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i64> @llvm.experimental.constrained.fptosi.nxv8i64.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i64> @vfptosi_nxv8f64_nxv8i64(<vscale x 8 x double> %va) {
+define <vscale x 8 x i64> @vfptosi_nxv8f64_nxv8i64(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptosi_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -1772,7 +1772,7 @@ define <vscale x 8 x i64> @vfptosi_nxv8f64_nxv8i64(<vscale x 8 x double> %va) {
 }
 
 declare <vscale x 8 x i64> @llvm.experimental.constrained.fptoui.nxv8i64.nxv8f64(<vscale x 8 x double>, metadata)
-define <vscale x 8 x i64> @vfptoui_nxv8f64_nxv8i64(<vscale x 8 x double> %va) {
+define <vscale x 8 x i64> @vfptoui_nxv8f64_nxv8i64(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptoui_nxv8f64_nxv8i64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll
index d8a82098a99075..72bf2b94e6f9f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fptrunc.nxv1f32.nxv1f64(<vscale x 1 x double>, metadata, metadata)
-define <vscale x 1 x float> @vfptrunc_nxv1f64_nxv1f32(<vscale x 1 x double> %va) {
+define <vscale x 1 x float> @vfptrunc_nxv1f64_nxv1f32(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -17,7 +17,7 @@ define <vscale x 1 x float> @vfptrunc_nxv1f64_nxv1f32(<vscale x 1 x double> %va)
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f64(<vscale x 1 x double>, metadata, metadata)
-define <vscale x 1 x half> @vfptrunc_nxv1f64_nxv1f16(<vscale x 1 x double> %va) {
+define <vscale x 1 x half> @vfptrunc_nxv1f64_nxv1f16(<vscale x 1 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -30,7 +30,7 @@ define <vscale x 1 x half> @vfptrunc_nxv1f64_nxv1f16(<vscale x 1 x double> %va)
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fptrunc.nxv1f16.nxv1f32(<vscale x 1 x float>, metadata, metadata)
-define <vscale x 1 x half> @vfptrunc_nxv1f32_nxv1f16(<vscale x 1 x float> %va) {
+define <vscale x 1 x half> @vfptrunc_nxv1f32_nxv1f16(<vscale x 1 x float> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -42,7 +42,7 @@ define <vscale x 1 x half> @vfptrunc_nxv1f32_nxv1f16(<vscale x 1 x float> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fptrunc.nxv2f32.nxv2f64(<vscale x 2 x double>, metadata, metadata)
-define <vscale x 2 x float> @vfptrunc_nxv2f64_nxv2f32(<vscale x 2 x double> %va) {
+define <vscale x 2 x float> @vfptrunc_nxv2f64_nxv2f32(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -54,7 +54,7 @@ define <vscale x 2 x float> @vfptrunc_nxv2f64_nxv2f32(<vscale x 2 x double> %va)
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f64(<vscale x 2 x double>, metadata, metadata)
-define <vscale x 2 x half> @vfptrunc_nxv2f64_nxv2f16(<vscale x 2 x double> %va) {
+define <vscale x 2 x half> @vfptrunc_nxv2f64_nxv2f16(<vscale x 2 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -67,7 +67,7 @@ define <vscale x 2 x half> @vfptrunc_nxv2f64_nxv2f16(<vscale x 2 x double> %va)
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fptrunc.nxv2f16.nxv2f32(<vscale x 2 x float>, metadata, metadata)
-define <vscale x 2 x half> @vfptrunc_nxv2f32_nxv2f16(<vscale x 2 x float> %va) {
+define <vscale x 2 x half> @vfptrunc_nxv2f32_nxv2f16(<vscale x 2 x float> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -79,7 +79,7 @@ define <vscale x 2 x half> @vfptrunc_nxv2f32_nxv2f16(<vscale x 2 x float> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fptrunc.nxv4f32.nxv4f64(<vscale x 4 x double>, metadata, metadata)
-define <vscale x 4 x float> @vfptrunc_nxv4f64_nxv4f32(<vscale x 4 x double> %va) {
+define <vscale x 4 x float> @vfptrunc_nxv4f64_nxv4f32(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -91,7 +91,7 @@ define <vscale x 4 x float> @vfptrunc_nxv4f64_nxv4f32(<vscale x 4 x double> %va)
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f64(<vscale x 4 x double>, metadata, metadata)
-define <vscale x 4 x half> @vfptrunc_nxv4f64_nxv4f16(<vscale x 4 x double> %va) {
+define <vscale x 4 x half> @vfptrunc_nxv4f64_nxv4f16(<vscale x 4 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -104,7 +104,7 @@ define <vscale x 4 x half> @vfptrunc_nxv4f64_nxv4f16(<vscale x 4 x double> %va)
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fptrunc.nxv4f16.nxv4f32(<vscale x 4 x float>, metadata, metadata)
-define <vscale x 4 x half> @vfptrunc_nxv4f32_nxv4f16(<vscale x 4 x float> %va) {
+define <vscale x 4 x half> @vfptrunc_nxv4f32_nxv4f16(<vscale x 4 x float> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -116,7 +116,7 @@ define <vscale x 4 x half> @vfptrunc_nxv4f32_nxv4f16(<vscale x 4 x float> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fptrunc.nxv8f32.nxv8f64(<vscale x 8 x double>, metadata, metadata)
-define <vscale x 8 x float> @vfptrunc_nxv8f64_nxv8f32(<vscale x 8 x double> %va) {
+define <vscale x 8 x float> @vfptrunc_nxv8f64_nxv8f32(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -128,7 +128,7 @@ define <vscale x 8 x float> @vfptrunc_nxv8f64_nxv8f32(<vscale x 8 x double> %va)
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f64(<vscale x 8 x double>, metadata, metadata)
-define <vscale x 8 x half> @vfptrunc_nxv8f64_nxv8f16(<vscale x 8 x double> %va) {
+define <vscale x 8 x half> @vfptrunc_nxv8f64_nxv8f16(<vscale x 8 x double> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -141,7 +141,7 @@ define <vscale x 8 x half> @vfptrunc_nxv8f64_nxv8f16(<vscale x 8 x double> %va)
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fptrunc.nxv8f16.nxv8f32(<vscale x 8 x float>, metadata, metadata)
-define <vscale x 8 x half> @vfptrunc_nxv8f32_nxv8f16(<vscale x 8 x float> %va) {
+define <vscale x 8 x half> @vfptrunc_nxv8f32_nxv8f16(<vscale x 8 x float> %va) strictfp {
 ; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll
index 0f2f855182ba93..09c39e95fb1d48 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-constrained-sdnode.ll
@@ -6,7 +6,7 @@
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.sqrt.nxv1f16(<vscale x 1 x half>, metadata, metadata)
 
-define <vscale x 1 x half> @vfsqrt_nxv1f16(<vscale x 1 x half> %v) {
+define <vscale x 1 x half> @vfsqrt_nxv1f16(<vscale x 1 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -18,7 +18,7 @@ define <vscale x 1 x half> @vfsqrt_nxv1f16(<vscale x 1 x half> %v) {
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.sqrt.nxv2f16(<vscale x 2 x half>, metadata, metadata)
 
-define <vscale x 2 x half> @vfsqrt_nxv2f16(<vscale x 2 x half> %v) {
+define <vscale x 2 x half> @vfsqrt_nxv2f16(<vscale x 2 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -30,7 +30,7 @@ define <vscale x 2 x half> @vfsqrt_nxv2f16(<vscale x 2 x half> %v) {
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.sqrt.nxv4f16(<vscale x 4 x half>, metadata, metadata)
 
-define <vscale x 4 x half> @vfsqrt_nxv4f16(<vscale x 4 x half> %v) {
+define <vscale x 4 x half> @vfsqrt_nxv4f16(<vscale x 4 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -42,7 +42,7 @@ define <vscale x 4 x half> @vfsqrt_nxv4f16(<vscale x 4 x half> %v) {
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.sqrt.nxv8f16(<vscale x 8 x half>, metadata, metadata)
 
-define <vscale x 8 x half> @vfsqrt_nxv8f16(<vscale x 8 x half> %v) {
+define <vscale x 8 x half> @vfsqrt_nxv8f16(<vscale x 8 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -54,7 +54,7 @@ define <vscale x 8 x half> @vfsqrt_nxv8f16(<vscale x 8 x half> %v) {
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.sqrt.nxv16f16(<vscale x 16 x half>, metadata, metadata)
 
-define <vscale x 16 x half> @vfsqrt_nxv16f16(<vscale x 16 x half> %v) {
+define <vscale x 16 x half> @vfsqrt_nxv16f16(<vscale x 16 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -66,7 +66,7 @@ define <vscale x 16 x half> @vfsqrt_nxv16f16(<vscale x 16 x half> %v) {
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.sqrt.nxv32f16(<vscale x 32 x half>, metadata, metadata)
 
-define <vscale x 32 x half> @vfsqrt_nxv32f16(<vscale x 32 x half> %v) {
+define <vscale x 32 x half> @vfsqrt_nxv32f16(<vscale x 32 x half> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -78,7 +78,7 @@ define <vscale x 32 x half> @vfsqrt_nxv32f16(<vscale x 32 x half> %v) {
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.sqrt.nxv1f32(<vscale x 1 x float>, metadata, metadata)
 
-define <vscale x 1 x float> @vfsqrt_nxv1f32(<vscale x 1 x float> %v) {
+define <vscale x 1 x float> @vfsqrt_nxv1f32(<vscale x 1 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -90,7 +90,7 @@ define <vscale x 1 x float> @vfsqrt_nxv1f32(<vscale x 1 x float> %v) {
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.sqrt.nxv2f32(<vscale x 2 x float>, metadata, metadata)
 
-define <vscale x 2 x float> @vfsqrt_nxv2f32(<vscale x 2 x float> %v) {
+define <vscale x 2 x float> @vfsqrt_nxv2f32(<vscale x 2 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -102,7 +102,7 @@ define <vscale x 2 x float> @vfsqrt_nxv2f32(<vscale x 2 x float> %v) {
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.sqrt.nxv4f32(<vscale x 4 x float>, metadata, metadata)
 
-define <vscale x 4 x float> @vfsqrt_nxv4f32(<vscale x 4 x float> %v) {
+define <vscale x 4 x float> @vfsqrt_nxv4f32(<vscale x 4 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -114,7 +114,7 @@ define <vscale x 4 x float> @vfsqrt_nxv4f32(<vscale x 4 x float> %v) {
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.sqrt.nxv8f32(<vscale x 8 x float>, metadata, metadata)
 
-define <vscale x 8 x float> @vfsqrt_nxv8f32(<vscale x 8 x float> %v) {
+define <vscale x 8 x float> @vfsqrt_nxv8f32(<vscale x 8 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -126,7 +126,7 @@ define <vscale x 8 x float> @vfsqrt_nxv8f32(<vscale x 8 x float> %v) {
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.sqrt.nxv16f32(<vscale x 16 x float>, metadata, metadata)
 
-define <vscale x 16 x float> @vfsqrt_nxv16f32(<vscale x 16 x float> %v) {
+define <vscale x 16 x float> @vfsqrt_nxv16f32(<vscale x 16 x float> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -138,7 +138,7 @@ define <vscale x 16 x float> @vfsqrt_nxv16f32(<vscale x 16 x float> %v) {
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.sqrt.nxv1f64(<vscale x 1 x double>, metadata, metadata)
 
-define <vscale x 1 x double> @vfsqrt_nxv1f64(<vscale x 1 x double> %v) {
+define <vscale x 1 x double> @vfsqrt_nxv1f64(<vscale x 1 x double> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -150,7 +150,7 @@ define <vscale x 1 x double> @vfsqrt_nxv1f64(<vscale x 1 x double> %v) {
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.sqrt.nxv2f64(<vscale x 2 x double>, metadata, metadata)
 
-define <vscale x 2 x double> @vfsqrt_nxv2f64(<vscale x 2 x double> %v) {
+define <vscale x 2 x double> @vfsqrt_nxv2f64(<vscale x 2 x double> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -162,7 +162,7 @@ define <vscale x 2 x double> @vfsqrt_nxv2f64(<vscale x 2 x double> %v) {
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.sqrt.nxv4f64(<vscale x 4 x double>, metadata, metadata)
 
-define <vscale x 4 x double> @vfsqrt_nxv4f64(<vscale x 4 x double> %v) {
+define <vscale x 4 x double> @vfsqrt_nxv4f64(<vscale x 4 x double> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -174,7 +174,7 @@ define <vscale x 4 x double> @vfsqrt_nxv4f64(<vscale x 4 x double> %v) {
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.sqrt.nxv8f64(<vscale x 8 x double>, metadata, metadata)
 
-define <vscale x 8 x double> @vfsqrt_nxv8f64(<vscale x 8 x double> %v) {
+define <vscale x 8 x double> @vfsqrt_nxv8f64(<vscale x 8 x double> %v) strictfp {
 ; CHECK-LABEL: vfsqrt_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll
index fcbe5b831e4542..7a4d9d35a03b1c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.fsub.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
-define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) {
+define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -16,7 +16,7 @@ entry:
   ret <vscale x 1 x half> %vc
 }
 
-define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
+define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -29,7 +29,7 @@ define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.fsub.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
-define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) {
+define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -40,7 +40,7 @@ entry:
   ret <vscale x 2 x half> %vc
 }
 
-define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
+define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -53,7 +53,7 @@ define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.fsub.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
-define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) {
+define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -64,7 +64,7 @@ entry:
   ret <vscale x 4 x half> %vc
 }
 
-define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
+define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -77,7 +77,7 @@ define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.fsub.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
-define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) {
+define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -88,7 +88,7 @@ entry:
   ret <vscale x 8 x half> %vc
 }
 
-define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
+define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -100,7 +100,7 @@ define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
   ret <vscale x 8 x half> %vc
 }
 
-define <vscale x 8 x half> @vfsub_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
+define <vscale x 8 x half> @vfsub_fv_nxv8f16(<vscale x 8 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_fv_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -113,7 +113,7 @@ define <vscale x 8 x half> @vfsub_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.fsub.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
-define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) {
+define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -124,7 +124,7 @@ entry:
   ret <vscale x 16 x half> %vc
 }
 
-define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b) {
+define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -137,7 +137,7 @@ define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.fsub.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
-define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) {
+define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -148,7 +148,7 @@ entry:
   ret <vscale x 32 x half> %vc
 }
 
-define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b) {
+define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -161,7 +161,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.fsub.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
-define <vscale x 1 x float> @vfsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) {
+define <vscale x 1 x float> @vfsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -172,7 +172,7 @@ entry:
   ret <vscale x 1 x float> %vc
 }
 
-define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b) {
+define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -185,7 +185,7 @@ define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.fsub.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
-define <vscale x 2 x float> @vfsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) {
+define <vscale x 2 x float> @vfsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -196,7 +196,7 @@ entry:
   ret <vscale x 2 x float> %vc
 }
 
-define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b) {
+define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -209,7 +209,7 @@ define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.fsub.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
-define <vscale x 4 x float> @vfsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) {
+define <vscale x 4 x float> @vfsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -220,7 +220,7 @@ entry:
   ret <vscale x 4 x float> %vc
 }
 
-define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b) {
+define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -233,7 +233,7 @@ define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.fsub.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
-define <vscale x 8 x float> @vfsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) {
+define <vscale x 8 x float> @vfsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -244,7 +244,7 @@ entry:
   ret <vscale x 8 x float> %vc
 }
 
-define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b) {
+define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -256,7 +256,7 @@ define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b
   ret <vscale x 8 x float> %vc
 }
 
-define <vscale x 8 x float> @vfsub_fv_nxv8f32(<vscale x 8 x float> %va, float %b) {
+define <vscale x 8 x float> @vfsub_fv_nxv8f32(<vscale x 8 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_fv_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -269,7 +269,7 @@ define <vscale x 8 x float> @vfsub_fv_nxv8f32(<vscale x 8 x float> %va, float %b
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.fsub.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
-define <vscale x 16 x float> @vfsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) {
+define <vscale x 16 x float> @vfsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -280,7 +280,7 @@ entry:
   ret <vscale x 16 x float> %vc
 }
 
-define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b) {
+define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -293,7 +293,7 @@ define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.fsub.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
-define <vscale x 1 x double> @vfsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) {
+define <vscale x 1 x double> @vfsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -304,7 +304,7 @@ entry:
   ret <vscale x 1 x double> %vc
 }
 
-define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b) {
+define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -317,7 +317,7 @@ define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.fsub.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
-define <vscale x 2 x double> @vfsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) {
+define <vscale x 2 x double> @vfsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -328,7 +328,7 @@ entry:
   ret <vscale x 2 x double> %vc
 }
 
-define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b) {
+define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -341,7 +341,7 @@ define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.fsub.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
-define <vscale x 4 x double> @vfsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) {
+define <vscale x 4 x double> @vfsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -352,7 +352,7 @@ entry:
   ret <vscale x 4 x double> %vc
 }
 
-define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b) {
+define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -365,7 +365,7 @@ define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.fsub.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
-define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) {
+define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb) strictfp {
 ; CHECK-LABEL: vfsub_vv_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -376,7 +376,7 @@ entry:
   ret <vscale x 8 x double> %vc
 }
 
-define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b) {
+define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_vf_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -388,7 +388,7 @@ define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double
   ret <vscale x 8 x double> %vc
 }
 
-define <vscale x 8 x double> @vfsub_fv_nxv8f64(<vscale x 8 x double> %va, double %b) {
+define <vscale x 8 x double> @vfsub_fv_nxv8f64(<vscale x 8 x double> %va, double %b) strictfp {
 ; CHECK-LABEL: vfsub_fv_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll
index 35fa4139a41ff7..90e5f58a603a59 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-constrained-sdnode.ll
@@ -5,7 +5,7 @@
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
-define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
+define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -18,7 +18,7 @@ define <vscale x 1 x half> @vsitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
-define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
+define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -31,7 +31,7 @@ define <vscale x 1 x half> @vuitofp_nxv1i1_nxv1f16(<vscale x 1 x i1> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
-define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
+define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -44,7 +44,7 @@ define <vscale x 1 x float> @vsitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
-define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
+define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -57,7 +57,7 @@ define <vscale x 1 x float> @vuitofp_nxv1i1_nxv1f32(<vscale x 1 x i1> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
-define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
+define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i1_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -70,7 +70,7 @@ define <vscale x 1 x double> @vsitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i1(<vscale x 1 x i1>, metadata, metadata)
-define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
+define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i1_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -83,7 +83,7 @@ define <vscale x 1 x double> @vuitofp_nxv1i1_nxv1f64(<vscale x 1 x i1> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
-define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
+define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -96,7 +96,7 @@ define <vscale x 2 x half> @vsitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
-define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
+define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -109,7 +109,7 @@ define <vscale x 2 x half> @vuitofp_nxv2i1_nxv2f16(<vscale x 2 x i1> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
-define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
+define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -122,7 +122,7 @@ define <vscale x 2 x float> @vsitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
-define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
+define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -135,7 +135,7 @@ define <vscale x 2 x float> @vuitofp_nxv2i1_nxv2f32(<vscale x 2 x i1> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
-define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
+define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i1_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -148,7 +148,7 @@ define <vscale x 2 x double> @vsitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i1(<vscale x 2 x i1>, metadata, metadata)
-define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
+define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i1_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -161,7 +161,7 @@ define <vscale x 2 x double> @vuitofp_nxv2i1_nxv2f64(<vscale x 2 x i1> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
-define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
+define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -174,7 +174,7 @@ define <vscale x 4 x half> @vsitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
-define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
+define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -187,7 +187,7 @@ define <vscale x 4 x half> @vuitofp_nxv4i1_nxv4f16(<vscale x 4 x i1> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
-define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
+define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -200,7 +200,7 @@ define <vscale x 4 x float> @vsitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
-define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
+define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -213,7 +213,7 @@ define <vscale x 4 x float> @vuitofp_nxv4i1_nxv4f32(<vscale x 4 x i1> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
-define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
+define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i1_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -226,7 +226,7 @@ define <vscale x 4 x double> @vsitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i1(<vscale x 4 x i1>, metadata, metadata)
-define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
+define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i1_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -239,7 +239,7 @@ define <vscale x 4 x double> @vuitofp_nxv4i1_nxv4f64(<vscale x 4 x i1> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
-define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
+define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -252,7 +252,7 @@ define <vscale x 8 x half> @vsitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
-define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
+define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -265,7 +265,7 @@ define <vscale x 8 x half> @vuitofp_nxv8i1_nxv8f16(<vscale x 8 x i1> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
-define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
+define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -278,7 +278,7 @@ define <vscale x 8 x float> @vsitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
-define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
+define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -291,7 +291,7 @@ define <vscale x 8 x float> @vuitofp_nxv8i1_nxv8f32(<vscale x 8 x i1> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
-define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
+define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i1_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -304,7 +304,7 @@ define <vscale x 8 x double> @vsitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i1(<vscale x 8 x i1>, metadata, metadata)
-define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
+define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i1_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -317,7 +317,7 @@ define <vscale x 8 x double> @vuitofp_nxv8i1_nxv8f64(<vscale x 8 x i1> %va) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
-define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
+define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -330,7 +330,7 @@ define <vscale x 16 x half> @vsitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
-define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
+define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -343,7 +343,7 @@ define <vscale x 16 x half> @vuitofp_nxv16i1_nxv16f16(<vscale x 16 x i1> %va) {
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
-define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
+define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i1_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -356,7 +356,7 @@ define <vscale x 16 x float> @vsitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i1(<vscale x 16 x i1>, metadata, metadata)
-define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
+define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i1_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -369,7 +369,7 @@ define <vscale x 16 x float> @vuitofp_nxv16i1_nxv16f32(<vscale x 16 x i1> %va) {
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
-define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
+define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv32i1_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -382,7 +382,7 @@ define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i1(<vscale x 32 x i1>, metadata, metadata)
-define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
+define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv32i1_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -395,7 +395,7 @@ define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
-define <vscale x 1 x half> @vsitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) {
+define <vscale x 1 x half> @vsitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -407,7 +407,7 @@ define <vscale x 1 x half> @vsitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
-define <vscale x 1 x half> @vsitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) {
+define <vscale x 1 x half> @vsitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i7_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -420,7 +420,7 @@ define <vscale x 1 x half> @vsitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i7(<vscale x 1 x i7>, metadata, metadata)
-define <vscale x 1 x half> @vuitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) {
+define <vscale x 1 x half> @vuitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i7_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    li a0, 127
@@ -433,7 +433,7 @@ define <vscale x 1 x half> @vuitofp_nxv1i7_nxv1f16(<vscale x 1 x i7> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
-define <vscale x 1 x half> @vuitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) {
+define <vscale x 1 x half> @vuitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
@@ -445,7 +445,7 @@ define <vscale x 1 x half> @vuitofp_nxv1i8_nxv1f16(<vscale x 1 x i8> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
-define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
+define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -457,7 +457,7 @@ define <vscale x 1 x float> @vsitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
-define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
+define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -469,7 +469,7 @@ define <vscale x 1 x float> @vuitofp_nxv1i8_nxv1f32(<vscale x 1 x i8> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
-define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
+define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i8_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -481,7 +481,7 @@ define <vscale x 1 x double> @vsitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i8(<vscale x 1 x i8>, metadata, metadata)
-define <vscale x 1 x double> @vuitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
+define <vscale x 1 x double> @vuitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i8_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -493,7 +493,7 @@ define <vscale x 1 x double> @vuitofp_nxv1i8_nxv1f64(<vscale x 1 x i8> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
-define <vscale x 2 x half> @vsitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) {
+define <vscale x 2 x half> @vsitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -505,7 +505,7 @@ define <vscale x 2 x half> @vsitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
-define <vscale x 2 x half> @vuitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) {
+define <vscale x 2 x half> @vuitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
@@ -517,7 +517,7 @@ define <vscale x 2 x half> @vuitofp_nxv2i8_nxv2f16(<vscale x 2 x i8> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
-define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
+define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -529,7 +529,7 @@ define <vscale x 2 x float> @vsitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
-define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
+define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -541,7 +541,7 @@ define <vscale x 2 x float> @vuitofp_nxv2i8_nxv2f32(<vscale x 2 x i8> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
-define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
+define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i8_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -553,7 +553,7 @@ define <vscale x 2 x double> @vsitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i8(<vscale x 2 x i8>, metadata, metadata)
-define <vscale x 2 x double> @vuitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
+define <vscale x 2 x double> @vuitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i8_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -565,7 +565,7 @@ define <vscale x 2 x double> @vuitofp_nxv2i8_nxv2f64(<vscale x 2 x i8> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
-define <vscale x 4 x half> @vsitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) {
+define <vscale x 4 x half> @vsitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -577,7 +577,7 @@ define <vscale x 4 x half> @vsitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
-define <vscale x 4 x half> @vuitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) {
+define <vscale x 4 x half> @vuitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
@@ -589,7 +589,7 @@ define <vscale x 4 x half> @vuitofp_nxv4i8_nxv4f16(<vscale x 4 x i8> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
-define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
+define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -601,7 +601,7 @@ define <vscale x 4 x float> @vsitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
-define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
+define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -613,7 +613,7 @@ define <vscale x 4 x float> @vuitofp_nxv4i8_nxv4f32(<vscale x 4 x i8> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
-define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
+define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i8_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -625,7 +625,7 @@ define <vscale x 4 x double> @vsitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i8(<vscale x 4 x i8>, metadata, metadata)
-define <vscale x 4 x double> @vuitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
+define <vscale x 4 x double> @vuitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i8_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -637,7 +637,7 @@ define <vscale x 4 x double> @vuitofp_nxv4i8_nxv4f64(<vscale x 4 x i8> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
-define <vscale x 8 x half> @vsitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) {
+define <vscale x 8 x half> @vsitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -649,7 +649,7 @@ define <vscale x 8 x half> @vsitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
-define <vscale x 8 x half> @vuitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) {
+define <vscale x 8 x half> @vuitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
@@ -661,7 +661,7 @@ define <vscale x 8 x half> @vuitofp_nxv8i8_nxv8f16(<vscale x 8 x i8> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
-define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
+define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -673,7 +673,7 @@ define <vscale x 8 x float> @vsitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
-define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
+define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -685,7 +685,7 @@ define <vscale x 8 x float> @vuitofp_nxv8i8_nxv8f32(<vscale x 8 x i8> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
-define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
+define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i8_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -697,7 +697,7 @@ define <vscale x 8 x double> @vsitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i8(<vscale x 8 x i8>, metadata, metadata)
-define <vscale x 8 x double> @vuitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
+define <vscale x 8 x double> @vuitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i8_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -709,7 +709,7 @@ define <vscale x 8 x double> @vuitofp_nxv8i8_nxv8f64(<vscale x 8 x i8> %va) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
-define <vscale x 16 x half> @vsitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) {
+define <vscale x 16 x half> @vsitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -721,7 +721,7 @@ define <vscale x 16 x half> @vsitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
-define <vscale x 16 x half> @vuitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) {
+define <vscale x 16 x half> @vuitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
@@ -733,7 +733,7 @@ define <vscale x 16 x half> @vuitofp_nxv16i8_nxv16f16(<vscale x 16 x i8> %va) {
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
-define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
+define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i8_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -745,7 +745,7 @@ define <vscale x 16 x float> @vsitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i8(<vscale x 16 x i8>, metadata, metadata)
-define <vscale x 16 x float> @vuitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
+define <vscale x 16 x float> @vuitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i8_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -757,7 +757,7 @@ define <vscale x 16 x float> @vuitofp_nxv16i8_nxv16f32(<vscale x 16 x i8> %va) {
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
-define <vscale x 32 x half> @vsitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) {
+define <vscale x 32 x half> @vsitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -769,7 +769,7 @@ define <vscale x 32 x half> @vsitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) {
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i8(<vscale x 32 x i8>, metadata, metadata)
-define <vscale x 32 x half> @vuitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) {
+define <vscale x 32 x half> @vuitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv32i8_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
@@ -781,7 +781,7 @@ define <vscale x 32 x half> @vuitofp_nxv32i8_nxv32f16(<vscale x 32 x i8> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
-define <vscale x 1 x half> @vsitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) {
+define <vscale x 1 x half> @vsitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -792,7 +792,7 @@ define <vscale x 1 x half> @vsitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
-define <vscale x 1 x half> @vuitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) {
+define <vscale x 1 x half> @vuitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -803,7 +803,7 @@ define <vscale x 1 x half> @vuitofp_nxv1i16_nxv1f16(<vscale x 1 x i16> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
-define <vscale x 1 x float> @vsitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) {
+define <vscale x 1 x float> @vsitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -815,7 +815,7 @@ define <vscale x 1 x float> @vsitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
-define <vscale x 1 x float> @vuitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) {
+define <vscale x 1 x float> @vuitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -827,7 +827,7 @@ define <vscale x 1 x float> @vuitofp_nxv1i16_nxv1f32(<vscale x 1 x i16> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
-define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
+define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i16_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -839,7 +839,7 @@ define <vscale x 1 x double> @vsitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i16(<vscale x 1 x i16>, metadata, metadata)
-define <vscale x 1 x double> @vuitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
+define <vscale x 1 x double> @vuitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i16_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -851,7 +851,7 @@ define <vscale x 1 x double> @vuitofp_nxv1i16_nxv1f64(<vscale x 1 x i16> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
-define <vscale x 2 x half> @vsitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) {
+define <vscale x 2 x half> @vsitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -862,7 +862,7 @@ define <vscale x 2 x half> @vsitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
-define <vscale x 2 x half> @vuitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) {
+define <vscale x 2 x half> @vuitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -873,7 +873,7 @@ define <vscale x 2 x half> @vuitofp_nxv2i16_nxv2f16(<vscale x 2 x i16> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
-define <vscale x 2 x float> @vsitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) {
+define <vscale x 2 x float> @vsitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -885,7 +885,7 @@ define <vscale x 2 x float> @vsitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
-define <vscale x 2 x float> @vuitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) {
+define <vscale x 2 x float> @vuitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -897,7 +897,7 @@ define <vscale x 2 x float> @vuitofp_nxv2i16_nxv2f32(<vscale x 2 x i16> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
-define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
+define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i16_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -909,7 +909,7 @@ define <vscale x 2 x double> @vsitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i16(<vscale x 2 x i16>, metadata, metadata)
-define <vscale x 2 x double> @vuitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
+define <vscale x 2 x double> @vuitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i16_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -921,7 +921,7 @@ define <vscale x 2 x double> @vuitofp_nxv2i16_nxv2f64(<vscale x 2 x i16> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
-define <vscale x 4 x half> @vsitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) {
+define <vscale x 4 x half> @vsitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -932,7 +932,7 @@ define <vscale x 4 x half> @vsitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
-define <vscale x 4 x half> @vuitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) {
+define <vscale x 4 x half> @vuitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -943,7 +943,7 @@ define <vscale x 4 x half> @vuitofp_nxv4i16_nxv4f16(<vscale x 4 x i16> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
-define <vscale x 4 x float> @vsitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) {
+define <vscale x 4 x float> @vsitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -955,7 +955,7 @@ define <vscale x 4 x float> @vsitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
-define <vscale x 4 x float> @vuitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) {
+define <vscale x 4 x float> @vuitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -967,7 +967,7 @@ define <vscale x 4 x float> @vuitofp_nxv4i16_nxv4f32(<vscale x 4 x i16> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
-define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
+define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i16_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -979,7 +979,7 @@ define <vscale x 4 x double> @vsitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i16(<vscale x 4 x i16>, metadata, metadata)
-define <vscale x 4 x double> @vuitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
+define <vscale x 4 x double> @vuitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i16_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -991,7 +991,7 @@ define <vscale x 4 x double> @vuitofp_nxv4i16_nxv4f64(<vscale x 4 x i16> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
-define <vscale x 8 x half> @vsitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) {
+define <vscale x 8 x half> @vsitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1002,7 +1002,7 @@ define <vscale x 8 x half> @vsitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
-define <vscale x 8 x half> @vuitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) {
+define <vscale x 8 x half> @vuitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1013,7 +1013,7 @@ define <vscale x 8 x half> @vuitofp_nxv8i16_nxv8f16(<vscale x 8 x i16> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
-define <vscale x 8 x float> @vsitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) {
+define <vscale x 8 x float> @vsitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1025,7 +1025,7 @@ define <vscale x 8 x float> @vsitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
-define <vscale x 8 x float> @vuitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) {
+define <vscale x 8 x float> @vuitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1037,7 +1037,7 @@ define <vscale x 8 x float> @vuitofp_nxv8i16_nxv8f32(<vscale x 8 x i16> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
-define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
+define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i16_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1049,7 +1049,7 @@ define <vscale x 8 x double> @vsitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i16(<vscale x 8 x i16>, metadata, metadata)
-define <vscale x 8 x double> @vuitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
+define <vscale x 8 x double> @vuitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i16_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1061,7 +1061,7 @@ define <vscale x 8 x double> @vuitofp_nxv8i16_nxv8f64(<vscale x 8 x i16> %va) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
-define <vscale x 16 x half> @vsitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) {
+define <vscale x 16 x half> @vsitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1072,7 +1072,7 @@ define <vscale x 16 x half> @vsitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va)
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
-define <vscale x 16 x half> @vuitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) {
+define <vscale x 16 x half> @vuitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1083,7 +1083,7 @@ define <vscale x 16 x half> @vuitofp_nxv16i16_nxv16f16(<vscale x 16 x i16> %va)
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
-define <vscale x 16 x float> @vsitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) {
+define <vscale x 16 x float> @vsitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1095,7 +1095,7 @@ define <vscale x 16 x float> @vsitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va)
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i16(<vscale x 16 x i16>, metadata, metadata)
-define <vscale x 16 x float> @vuitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) {
+define <vscale x 16 x float> @vuitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i16_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1107,7 +1107,7 @@ define <vscale x 16 x float> @vuitofp_nxv16i16_nxv16f32(<vscale x 16 x i16> %va)
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.sitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
-define <vscale x 32 x half> @vsitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) {
+define <vscale x 32 x half> @vsitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -1118,7 +1118,7 @@ define <vscale x 32 x half> @vsitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va)
 }
 
 declare <vscale x 32 x half> @llvm.experimental.constrained.uitofp.nxv32f16.nxv32i16(<vscale x 32 x i16>, metadata, metadata)
-define <vscale x 32 x half> @vuitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) {
+define <vscale x 32 x half> @vuitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv32i16_nxv32f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
@@ -1129,7 +1129,7 @@ define <vscale x 32 x half> @vuitofp_nxv32i16_nxv32f16(<vscale x 32 x i16> %va)
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
-define <vscale x 1 x half> @vsitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) {
+define <vscale x 1 x half> @vsitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -1141,7 +1141,7 @@ define <vscale x 1 x half> @vsitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
-define <vscale x 1 x half> @vuitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) {
+define <vscale x 1 x half> @vuitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
@@ -1153,7 +1153,7 @@ define <vscale x 1 x half> @vuitofp_nxv1i32_nxv1f16(<vscale x 1 x i32> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
-define <vscale x 1 x float> @vsitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) {
+define <vscale x 1 x float> @vsitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1164,7 +1164,7 @@ define <vscale x 1 x float> @vsitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
-define <vscale x 1 x float> @vuitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) {
+define <vscale x 1 x float> @vuitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1175,7 +1175,7 @@ define <vscale x 1 x float> @vuitofp_nxv1i32_nxv1f32(<vscale x 1 x i32> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
-define <vscale x 1 x double> @vsitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) {
+define <vscale x 1 x double> @vsitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1187,7 +1187,7 @@ define <vscale x 1 x double> @vsitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i32(<vscale x 1 x i32>, metadata, metadata)
-define <vscale x 1 x double> @vuitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) {
+define <vscale x 1 x double> @vuitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i32_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1199,7 +1199,7 @@ define <vscale x 1 x double> @vuitofp_nxv1i32_nxv1f64(<vscale x 1 x i32> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
-define <vscale x 2 x half> @vsitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) {
+define <vscale x 2 x half> @vsitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -1211,7 +1211,7 @@ define <vscale x 2 x half> @vsitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
-define <vscale x 2 x half> @vuitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) {
+define <vscale x 2 x half> @vuitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
@@ -1223,7 +1223,7 @@ define <vscale x 2 x half> @vuitofp_nxv2i32_nxv2f16(<vscale x 2 x i32> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
-define <vscale x 2 x float> @vsitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) {
+define <vscale x 2 x float> @vsitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1234,7 +1234,7 @@ define <vscale x 2 x float> @vsitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
-define <vscale x 2 x float> @vuitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) {
+define <vscale x 2 x float> @vuitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1245,7 +1245,7 @@ define <vscale x 2 x float> @vuitofp_nxv2i32_nxv2f32(<vscale x 2 x i32> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
-define <vscale x 2 x double> @vsitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) {
+define <vscale x 2 x double> @vsitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1257,7 +1257,7 @@ define <vscale x 2 x double> @vsitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i32(<vscale x 2 x i32>, metadata, metadata)
-define <vscale x 2 x double> @vuitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) {
+define <vscale x 2 x double> @vuitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i32_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1269,7 +1269,7 @@ define <vscale x 2 x double> @vuitofp_nxv2i32_nxv2f64(<vscale x 2 x i32> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
-define <vscale x 4 x half> @vsitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) {
+define <vscale x 4 x half> @vsitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -1281,7 +1281,7 @@ define <vscale x 4 x half> @vsitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
-define <vscale x 4 x half> @vuitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) {
+define <vscale x 4 x half> @vuitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
@@ -1293,7 +1293,7 @@ define <vscale x 4 x half> @vuitofp_nxv4i32_nxv4f16(<vscale x 4 x i32> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
-define <vscale x 4 x float> @vsitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) {
+define <vscale x 4 x float> @vsitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1304,7 +1304,7 @@ define <vscale x 4 x float> @vsitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
-define <vscale x 4 x float> @vuitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) {
+define <vscale x 4 x float> @vuitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1315,7 +1315,7 @@ define <vscale x 4 x float> @vuitofp_nxv4i32_nxv4f32(<vscale x 4 x i32> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
-define <vscale x 4 x double> @vsitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) {
+define <vscale x 4 x double> @vsitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1327,7 +1327,7 @@ define <vscale x 4 x double> @vsitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i32(<vscale x 4 x i32>, metadata, metadata)
-define <vscale x 4 x double> @vuitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) {
+define <vscale x 4 x double> @vuitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i32_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1339,7 +1339,7 @@ define <vscale x 4 x double> @vuitofp_nxv4i32_nxv4f64(<vscale x 4 x i32> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
-define <vscale x 8 x half> @vsitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) {
+define <vscale x 8 x half> @vsitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1351,7 +1351,7 @@ define <vscale x 8 x half> @vsitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
-define <vscale x 8 x half> @vuitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) {
+define <vscale x 8 x half> @vuitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
@@ -1363,7 +1363,7 @@ define <vscale x 8 x half> @vuitofp_nxv8i32_nxv8f16(<vscale x 8 x i32> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
-define <vscale x 8 x float> @vsitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) {
+define <vscale x 8 x float> @vsitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1374,7 +1374,7 @@ define <vscale x 8 x float> @vsitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
-define <vscale x 8 x float> @vuitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) {
+define <vscale x 8 x float> @vuitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1385,7 +1385,7 @@ define <vscale x 8 x float> @vuitofp_nxv8i32_nxv8f32(<vscale x 8 x i32> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
-define <vscale x 8 x double> @vsitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) {
+define <vscale x 8 x double> @vsitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1397,7 +1397,7 @@ define <vscale x 8 x double> @vsitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i32(<vscale x 8 x i32>, metadata, metadata)
-define <vscale x 8 x double> @vuitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) {
+define <vscale x 8 x double> @vuitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i32_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1409,7 +1409,7 @@ define <vscale x 8 x double> @vuitofp_nxv8i32_nxv8f64(<vscale x 8 x i32> %va) {
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.sitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
-define <vscale x 16 x half> @vsitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) {
+define <vscale x 16 x half> @vsitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1421,7 +1421,7 @@ define <vscale x 16 x half> @vsitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va)
 }
 
 declare <vscale x 16 x half> @llvm.experimental.constrained.uitofp.nxv16f16.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
-define <vscale x 16 x half> @vuitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) {
+define <vscale x 16 x half> @vuitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
@@ -1433,7 +1433,7 @@ define <vscale x 16 x half> @vuitofp_nxv16i32_nxv16f16(<vscale x 16 x i32> %va)
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.sitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
-define <vscale x 16 x float> @vsitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) {
+define <vscale x 16 x float> @vsitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -1444,7 +1444,7 @@ define <vscale x 16 x float> @vsitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va)
 }
 
 declare <vscale x 16 x float> @llvm.experimental.constrained.uitofp.nxv16f32.nxv16i32(<vscale x 16 x i32>, metadata, metadata)
-define <vscale x 16 x float> @vuitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) {
+define <vscale x 16 x float> @vuitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv16i32_nxv16f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
@@ -1455,7 +1455,7 @@ define <vscale x 16 x float> @vuitofp_nxv16i32_nxv16f32(<vscale x 16 x i32> %va)
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.sitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
-define <vscale x 1 x half> @vsitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) {
+define <vscale x 1 x half> @vsitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1468,7 +1468,7 @@ define <vscale x 1 x half> @vsitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) {
 }
 
 declare <vscale x 1 x half> @llvm.experimental.constrained.uitofp.nxv1f16.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
-define <vscale x 1 x half> @vuitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) {
+define <vscale x 1 x half> @vuitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1481,7 +1481,7 @@ define <vscale x 1 x half> @vuitofp_nxv1i64_nxv1f16(<vscale x 1 x i64> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.sitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
-define <vscale x 1 x float> @vsitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) {
+define <vscale x 1 x float> @vsitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1493,7 +1493,7 @@ define <vscale x 1 x float> @vsitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) {
 }
 
 declare <vscale x 1 x float> @llvm.experimental.constrained.uitofp.nxv1f32.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
-define <vscale x 1 x float> @vuitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) {
+define <vscale x 1 x float> @vuitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
@@ -1505,7 +1505,7 @@ define <vscale x 1 x float> @vuitofp_nxv1i64_nxv1f32(<vscale x 1 x i64> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.sitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
-define <vscale x 1 x double> @vsitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) {
+define <vscale x 1 x double> @vsitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -1516,7 +1516,7 @@ define <vscale x 1 x double> @vsitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) {
 }
 
 declare <vscale x 1 x double> @llvm.experimental.constrained.uitofp.nxv1f64.nxv1i64(<vscale x 1 x i64>, metadata, metadata)
-define <vscale x 1 x double> @vuitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) {
+define <vscale x 1 x double> @vuitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv1i64_nxv1f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
@@ -1528,7 +1528,7 @@ define <vscale x 1 x double> @vuitofp_nxv1i64_nxv1f64(<vscale x 1 x i64> %va) {
 
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.sitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
-define <vscale x 2 x half> @vsitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) {
+define <vscale x 2 x half> @vsitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1541,7 +1541,7 @@ define <vscale x 2 x half> @vsitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) {
 }
 
 declare <vscale x 2 x half> @llvm.experimental.constrained.uitofp.nxv2f16.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
-define <vscale x 2 x half> @vuitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) {
+define <vscale x 2 x half> @vuitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1554,7 +1554,7 @@ define <vscale x 2 x half> @vuitofp_nxv2i64_nxv2f16(<vscale x 2 x i64> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.sitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
-define <vscale x 2 x float> @vsitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) {
+define <vscale x 2 x float> @vsitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1566,7 +1566,7 @@ define <vscale x 2 x float> @vsitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) {
 }
 
 declare <vscale x 2 x float> @llvm.experimental.constrained.uitofp.nxv2f32.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
-define <vscale x 2 x float> @vuitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) {
+define <vscale x 2 x float> @vuitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
@@ -1578,7 +1578,7 @@ define <vscale x 2 x float> @vuitofp_nxv2i64_nxv2f32(<vscale x 2 x i64> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.sitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
-define <vscale x 2 x double> @vsitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) {
+define <vscale x 2 x double> @vsitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -1589,7 +1589,7 @@ define <vscale x 2 x double> @vsitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) {
 }
 
 declare <vscale x 2 x double> @llvm.experimental.constrained.uitofp.nxv2f64.nxv2i64(<vscale x 2 x i64>, metadata, metadata)
-define <vscale x 2 x double> @vuitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) {
+define <vscale x 2 x double> @vuitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv2i64_nxv2f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
@@ -1600,7 +1600,7 @@ define <vscale x 2 x double> @vuitofp_nxv2i64_nxv2f64(<vscale x 2 x i64> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.sitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
-define <vscale x 4 x half> @vsitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) {
+define <vscale x 4 x half> @vsitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1613,7 +1613,7 @@ define <vscale x 4 x half> @vsitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) {
 }
 
 declare <vscale x 4 x half> @llvm.experimental.constrained.uitofp.nxv4f16.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
-define <vscale x 4 x half> @vuitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) {
+define <vscale x 4 x half> @vuitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1626,7 +1626,7 @@ define <vscale x 4 x half> @vuitofp_nxv4i64_nxv4f16(<vscale x 4 x i64> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.sitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
-define <vscale x 4 x float> @vsitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) {
+define <vscale x 4 x float> @vsitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1638,7 +1638,7 @@ define <vscale x 4 x float> @vsitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) {
 }
 
 declare <vscale x 4 x float> @llvm.experimental.constrained.uitofp.nxv4f32.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
-define <vscale x 4 x float> @vuitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) {
+define <vscale x 4 x float> @vuitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
@@ -1650,7 +1650,7 @@ define <vscale x 4 x float> @vuitofp_nxv4i64_nxv4f32(<vscale x 4 x i64> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.sitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
-define <vscale x 4 x double> @vsitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) {
+define <vscale x 4 x double> @vsitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -1661,7 +1661,7 @@ define <vscale x 4 x double> @vsitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) {
 }
 
 declare <vscale x 4 x double> @llvm.experimental.constrained.uitofp.nxv4f64.nxv4i64(<vscale x 4 x i64>, metadata, metadata)
-define <vscale x 4 x double> @vuitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) {
+define <vscale x 4 x double> @vuitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv4i64_nxv4f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
@@ -1672,7 +1672,7 @@ define <vscale x 4 x double> @vuitofp_nxv4i64_nxv4f64(<vscale x 4 x i64> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.sitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
-define <vscale x 8 x half> @vsitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) {
+define <vscale x 8 x half> @vsitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1685,7 +1685,7 @@ define <vscale x 8 x half> @vsitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) {
 }
 
 declare <vscale x 8 x half> @llvm.experimental.constrained.uitofp.nxv8f16.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
-define <vscale x 8 x half> @vuitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) {
+define <vscale x 8 x half> @vuitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f16:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1698,7 +1698,7 @@ define <vscale x 8 x half> @vuitofp_nxv8i64_nxv8f16(<vscale x 8 x i64> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.sitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
-define <vscale x 8 x float> @vsitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) {
+define <vscale x 8 x float> @vsitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1710,7 +1710,7 @@ define <vscale x 8 x float> @vsitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) {
 }
 
 declare <vscale x 8 x float> @llvm.experimental.constrained.uitofp.nxv8f32.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
-define <vscale x 8 x float> @vuitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) {
+define <vscale x 8 x float> @vuitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
@@ -1722,7 +1722,7 @@ define <vscale x 8 x float> @vuitofp_nxv8i64_nxv8f32(<vscale x 8 x i64> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.sitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
-define <vscale x 8 x double> @vsitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) {
+define <vscale x 8 x double> @vsitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) strictfp {
 ; CHECK-LABEL: vsitofp_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
@@ -1733,7 +1733,7 @@ define <vscale x 8 x double> @vsitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) {
 }
 
 declare <vscale x 8 x double> @llvm.experimental.constrained.uitofp.nxv8f64.nxv8i64(<vscale x 8 x i64>, metadata, metadata)
-define <vscale x 8 x double> @vuitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) {
+define <vscale x 8 x double> @vuitofp_nxv8i64_nxv8f64(<vscale x 8 x i64> %va) strictfp {
 ; CHECK-LABEL: vuitofp_nxv8i64_nxv8f64:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma


        


More information about the llvm-commits mailing list