[clang] [lldb] [llvm] [bazel] Update after db7888ca9aef6c203b363bbb395549b4e6cfa9d4 (#146732) (PR #147726)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 9 06:39:31 PDT 2025
https://github.com/DeanSturtevant1 created https://github.com/llvm/llvm-project/pull/147726
None
>From a17168c446642f6fca6c8bda7c41818c00a794f6 Mon Sep 17 00:00:00 2001
From: Dean Sturtevant <dsturtevant at google.com>
Date: Tue, 8 Jul 2025 16:54:35 -0400
Subject: [PATCH 1/8] [bazel] Update after
058056329982db13d513bc05d3c98f6558418242
---
utils/bazel/llvm-project-overlay/llvm/BUILD.bazel | 3 +++
1 file changed, 3 insertions(+)
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index ed1db42bf9e5e..a4fd0545e89c8 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -220,6 +220,8 @@ genrule(
"echo -e '#undef HANDLE_EXTENSION' >> $@\n",
)
+
+
cc_library(
name = "Support",
srcs = glob([
@@ -269,6 +271,7 @@ cc_library(
"include/llvm-c/ExternC.h",
"include/llvm-c/Support.h",
"include/llvm-c/Types.h",
+ "include/llvm-c/Visibility.h",
"include/llvm-c/blake3.h",
"include/llvm/ExecutionEngine/JITSymbol.h",
"include/llvm/Support/Extension.def",
>From af457d58f6f37c06639cf74351cf3af85b9376e7 Mon Sep 17 00:00:00 2001
From: Dean Sturtevant <dsturtevant at google.com>
Date: Tue, 8 Jul 2025 16:59:38 -0400
Subject: [PATCH 2/8] Fix spacing
---
utils/bazel/llvm-project-overlay/llvm/BUILD.bazel | 2 --
1 file changed, 2 deletions(-)
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index a4fd0545e89c8..bea07cd44b143 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -220,8 +220,6 @@ genrule(
"echo -e '#undef HANDLE_EXTENSION' >> $@\n",
)
-
-
cc_library(
name = "Support",
srcs = glob([
>From 6e0f0287defe588759befb2b0a55d9646824fec6 Mon Sep 17 00:00:00 2001
From: Shilei Tian <i at tianshilei.me>
Date: Tue, 8 Jul 2025 16:21:24 -0400
Subject: [PATCH 3/8] [AMDGPU] Add support for `v_cvt_f32_fp8` on gfx1250
(#147579)
Co-authored-by: Mekhanoshin, Stanislav <Stanislav.Mekhanoshin at amd.com>
---
clang/include/clang/Basic/BuiltinsAMDGPU.def | 1 +
clang/test/CodeGenOpenCL/amdgpu-features.cl | 2 +-
.../CodeGenOpenCL/builtins-amdgcn-gfx1250.cl | 38 +++
.../builtins-amdgcn-error-gfx1250-param.cl | 5 +
llvm/include/llvm/IR/IntrinsicsAMDGPU.td | 6 +
llvm/lib/Target/AMDGPU/AMDGPU.td | 13 +
.../Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 1 +
.../AMDGPU/AsmParser/AMDGPUAsmParser.cpp | 8 +-
llvm/lib/Target/AMDGPU/GCNSubtarget.h | 3 +
llvm/lib/Target/AMDGPU/VOP1Instructions.td | 32 ++-
llvm/lib/Target/AMDGPU/VOPInstructions.td | 12 +-
llvm/lib/TargetParser/TargetParser.cpp | 1 +
.../AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll | 50 ++++
.../CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.ll | 267 ++++++++++++++++++
llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s | 9 +
llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s | 9 +
.../gfx1250_asm_vop3_from_vop1-fake16.s | 45 +++
.../MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s | 45 +++
.../gfx1250_asm_vop3_from_vop1_dpp16-fake16.s | 12 +
.../AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s | 12 +
.../gfx1250_asm_vop3_from_vop1_dpp8-fake16.s | 12 +
.../AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s | 12 +
.../Disassembler/AMDGPU/gfx1250_dasm_vop1.txt | 9 +
.../AMDGPU/gfx1250_dasm_vop1_dpp16.txt | 6 +
.../AMDGPU/gfx1250_dasm_vop3_from_vop1.txt | 45 +++
.../gfx1250_dasm_vop3_from_vop1_dpp16.txt | 9 +
.../gfx1250_dasm_vop3_from_vop1_dpp8.txt | 9 +
27 files changed, 655 insertions(+), 18 deletions(-)
create mode 100644 llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index fb358297a5eed..a5ee8013adff6 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -429,6 +429,7 @@ TARGET_BUILTIN(__builtin_amdgcn_smfmac_f32_32x32x32_fp8_fp8, "V16fV2iV4iV16fiIiI
TARGET_BUILTIN(__builtin_amdgcn_cvt_f32_bf8, "fiIi", "nc", "fp8-conversion-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_f32_fp8, "fiIi", "nc", "fp8-conversion-insts")
+TARGET_BUILTIN(__builtin_amdgcn_cvt_f32_fp8_e5m3, "fiIi", "nc", "fp8e5m3-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f32_bf8, "V2fiIb", "nc", "fp8-conversion-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_f32_fp8, "V2fiIb", "nc", "fp8-conversion-insts")
TARGET_BUILTIN(__builtin_amdgcn_cvt_pk_bf8_f32, "iffiIb", "nc", "fp8-conversion-insts")
diff --git a/clang/test/CodeGenOpenCL/amdgpu-features.cl b/clang/test/CodeGenOpenCL/amdgpu-features.cl
index dc7a83002b7f1..77d2414230cf2 100644
--- a/clang/test/CodeGenOpenCL/amdgpu-features.cl
+++ b/clang/test/CodeGenOpenCL/amdgpu-features.cl
@@ -108,7 +108,7 @@
// GFX1153: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1200: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
// GFX1201: "target-features"="+16-bit-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+ci-insts,+dl-insts,+dot10-insts,+dot11-insts,+dot12-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize32"
-// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+transpose-load-f4f6-insts,+wavefrontsize32"
+// GFX1250: "target-features"="+16-bit-insts,+ashr-pk-insts,+atomic-buffer-global-pk-add-f16-insts,+atomic-buffer-pk-add-bf16-inst,+atomic-ds-pk-add-16-insts,+atomic-fadd-rtn-insts,+atomic-flat-pk-add-16-insts,+atomic-global-pk-add-bf16-inst,+bitop3-insts,+ci-insts,+dl-insts,+dot7-insts,+dot8-insts,+dpp,+fp8-conversion-insts,+fp8e5m3-insts,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx12-insts,+gfx1250-insts,+gfx8-insts,+gfx9-insts,+permlane16-swap,+prng-inst,+setprio-inc-wg-inst,+transpose-load-f4f6-insts,+wavefrontsize32"
// GFX1103-W64: "target-features"="+16-bit-insts,+atomic-fadd-rtn-insts,+ci-insts,+dl-insts,+dot10-insts,+dot12-insts,+dot5-insts,+dot7-insts,+dot8-insts,+dot9-insts,+dpp,+gfx10-3-insts,+gfx10-insts,+gfx11-insts,+gfx8-insts,+gfx9-insts,+wavefrontsize64"
diff --git a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
index 4d4afedae3658..421099d3876e3 100644
--- a/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
+++ b/clang/test/CodeGenOpenCL/builtins-amdgcn-gfx1250.cl
@@ -139,3 +139,41 @@ void test_cvt_pk_f16_bf8(global half2* out, short a)
{
out[0] = __builtin_amdgcn_cvt_pk_f16_bf8(a);
}
+
+// CHECK-LABEL: @test_cvt_f32_fp8_e5m3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr addrspace(1), align 8, addrspace(5)
+// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
+// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
+// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
+// CHECK-NEXT: store ptr addrspace(1) [[OUT:%.*]], ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 [[TMP0]], i32 0)
+// CHECK-NEXT: [[CONV:%.*]] = fptosi float [[TMP1]] to i32
+// CHECK-NEXT: [[TMP2:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[CONV]], ptr addrspace(1) [[TMP2]], align 4
+// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 [[TMP3]], i32 1)
+// CHECK-NEXT: [[CONV1:%.*]] = fptosi float [[TMP4]] to i32
+// CHECK-NEXT: [[TMP5:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[CONV1]], ptr addrspace(1) [[TMP5]], align 4
+// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP7:%.*]] = call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 [[TMP6]], i32 2)
+// CHECK-NEXT: [[CONV2:%.*]] = fptosi float [[TMP7]] to i32
+// CHECK-NEXT: [[TMP8:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[CONV2]], ptr addrspace(1) [[TMP8]], align 4
+// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[A_ADDR_ASCAST]], align 4
+// CHECK-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 [[TMP9]], i32 3)
+// CHECK-NEXT: [[CONV3:%.*]] = fptosi float [[TMP10]] to i32
+// CHECK-NEXT: [[TMP11:%.*]] = load ptr addrspace(1), ptr [[OUT_ADDR_ASCAST]], align 8
+// CHECK-NEXT: store i32 [[CONV3]], ptr addrspace(1) [[TMP11]], align 4
+// CHECK-NEXT: ret void
+//
+void test_cvt_f32_fp8_e5m3(global int* out, int a)
+{
+ *out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, 0);
+ *out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, 1);
+ *out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, 2);
+ *out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, 3);
+}
diff --git a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
index 3ba0d50e79031..7494c4f984353 100644
--- a/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
+++ b/clang/test/SemaOpenCL/builtins-amdgcn-error-gfx1250-param.cl
@@ -27,3 +27,8 @@ void test_amdgcn_tensor_load_store(v4i sg0, v8i sg1, v4i sg2, v4i sg3, int cpol)
__builtin_amdgcn_tensor_store_from_lds(sg0, sg1, sg2, sg3, cpol); // expected-error {{'__builtin_amdgcn_tensor_store_from_lds' must be a constant integer}}
__builtin_amdgcn_tensor_store_from_lds_d2(sg0, sg1, cpol); // expected-error {{'__builtin_amdgcn_tensor_store_from_lds_d2' must be a constant integer}}
}
+
+void test_cvt_f32_fp8_e5m3(global int* out, int a)
+{
+ *out = __builtin_amdgcn_cvt_f32_fp8_e5m3(a, a); // expected-error {{'__builtin_amdgcn_cvt_f32_fp8_e5m3' must be a constant integer}}
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
index f7a9b65854696..16885f331e9dd 100644
--- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -3402,6 +3402,12 @@ def int_amdgcn_cvt_f32_fp8 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_fp8">,
[llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
+// llvm.amdgcn.cvt.f32.fp8.e5m3 float vdst, int srcA, imm byte_sel [0..3]
+def int_amdgcn_cvt_f32_fp8_e5m3 : ClangBuiltin<"__builtin_amdgcn_cvt_f32_fp8_e5m3">,
+ DefaultAttrsIntrinsic<[llvm_float_ty],
+ [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, ImmArg<ArgIndex<1>>]>;
+
// llvm.amdgcn.cvt.pk.f32.bf8 float2 vdst, int srcA, imm word_sel
// word_sel = 1 selects 2 high bytes, 0 selects 2 low bytes.
def int_amdgcn_cvt_pk_f32_bf8 : ClangBuiltin<"__builtin_amdgcn_cvt_pk_f32_bf8">,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 1a1c32fba9d18..a66b9c7285796 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -819,6 +819,12 @@ def FeatureFP8ConversionInsts : SubtargetFeature<"fp8-conversion-insts",
"Has fp8 and bf8 conversion instructions"
>;
+def FeatureFP8E5M3Insts : SubtargetFeature<"fp8e5m3-insts",
+ "HasFP8E5M3Insts",
+ "true",
+ "Has fp8 e5m3 format support"
+>;
+
def FeatureCvtFP8VOP1Bug : SubtargetFeature<"cvt-fp8-vop1-bug",
"HasCvtFP8Vop1Bug",
"true",
@@ -1937,6 +1943,7 @@ def FeatureISAVersion12_50 : FeatureSet<
FeatureAtomicBufferPkAddBF16Inst,
FeatureFlatAtomicFaddF32Inst,
FeatureFP8ConversionInsts,
+ FeatureFP8E5M3Insts,
FeaturePackedTID,
FeatureVcmpxPermlaneHazard,
FeatureSALUFloatInsts,
@@ -2573,6 +2580,12 @@ def HasFP8Insts : Predicate<"Subtarget->hasFP8Insts()">,
def HasFP8ConversionInsts : Predicate<"Subtarget->hasFP8ConversionInsts()">,
AssemblerPredicate<(all_of FeatureFP8ConversionInsts)>;
+def HasFP8E5M3Insts : Predicate<"Subtarget->hasFP8E5M3Insts()">,
+ AssemblerPredicate<(all_of FeatureFP8E5M3Insts)>;
+
+def NotHasFP8E5M3Insts : Predicate<"!Subtarget->hasFP8E5M3Insts()">,
+ AssemblerPredicate<(all_of (not FeatureFP8E5M3Insts))>;
+
def HasPkFmacF16Inst : Predicate<"Subtarget->hasPkFmacF16Inst()">,
AssemblerPredicate<(all_of FeaturePkFmacF16Inst)>;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 353fb23fa1520..1483d97d23fcc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4600,6 +4600,7 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case Intrinsic::amdgcn_dot4_f32_fp8_fp8:
case Intrinsic::amdgcn_dot4_f32_bf8_bf8:
case Intrinsic::amdgcn_cvt_f32_fp8:
+ case Intrinsic::amdgcn_cvt_f32_fp8_e5m3:
case Intrinsic::amdgcn_cvt_f32_bf8:
case Intrinsic::amdgcn_cvt_off_f32_i4:
case Intrinsic::amdgcn_cvt_pk_f32_fp8:
diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
index 628f1a69865ed..3af140461afdb 100644
--- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
+++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
@@ -9566,13 +9566,13 @@ void AMDGPUAsmParser::cvtVOP3DPP(MCInst &Inst, const OperandVector &Operands,
}
}
- if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel))
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp) && !IsVOP3CvtSrDpp)
addOptionalImmOperand(Inst, Operands, OptionalIdx,
- AMDGPUOperand::ImmTyByteSel);
+ AMDGPUOperand::ImmTyClamp);
- if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::clamp))
+ if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::byte_sel))
addOptionalImmOperand(Inst, Operands, OptionalIdx,
- AMDGPUOperand::ImmTyClamp);
+ AMDGPUOperand::ImmTyByteSel);
if (AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::omod))
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 45721422edcf1..f38076ea7c468 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -165,6 +165,7 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
bool HasMAIInsts = false;
bool HasFP8Insts = false;
bool HasFP8ConversionInsts = false;
+ bool HasFP8E5M3Insts = false;
bool HasCvtFP8Vop1Bug = false;
bool HasPkFmacF16Inst = false;
bool HasAtomicFMinFMaxF32GlobalInsts = false;
@@ -861,6 +862,8 @@ class GCNSubtarget final : public AMDGPUGenSubtargetInfo,
bool hasFP8ConversionInsts() const { return HasFP8ConversionInsts; }
+ bool hasFP8E5M3Insts() const { return HasFP8E5M3Insts; }
+
bool hasPkFmacF16Inst() const {
return HasPkFmacF16Inst;
}
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index 0dacd9df71305..5e88684c102ce 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -676,13 +676,14 @@ let HasClamp = 0, HasOMod = 0, HasExtDPP = 0, HasExtVOP3DPP = 0,
}
}
-class VOPProfile_Base_CVT_F_F8_ByteSel<ValueType DstVT> : VOPProfile<[DstVT, i32, untyped, untyped]> {
+class VOPProfile_Base_CVT_F_F8_ByteSel<ValueType DstVT, bit _HasClamp = 0> :
+ VOPProfile<[DstVT, i32, untyped, untyped]> {
+ let HasClamp = _HasClamp;
let HasFP8SrcByteSel = 1;
let HasOpSel = 0;
let HasExtDPP = 1;
let HasExtVOP3DPP = 1;
let HasExtSDWA = 0;
- let HasClamp = 0;
let HasOMod = 0;
let HasModifiers = 0;
}
@@ -695,7 +696,12 @@ def V_CVT_F16_F8_Fake16_Profile : VOP3_Profile_Fake16<V_CVT_F16_F8_Profile>;
let SubtargetPredicate = isGFX12Plus, OtherPredicates = [HasFP8ConversionInsts],
mayRaiseFPException = 0, SchedRW = [WriteFloatCvt] in {
- defm V_CVT_F32_FP8_OP_SEL : VOP1Inst<"v_cvt_f32_fp8_op_sel", VOPProfile_Base_CVT_F_F8_ByteSel<f32>>;
+ // FIXME: This differs from downstream due to changes that haven't been upstreamed yet.
+ let SubtargetPredicate = isGFX12PlusNot12_50 in
+ defm V_CVT_F32_FP8_OP_SEL : VOP1Inst<"v_cvt_f32_fp8_op_sel", VOPProfile_Base_CVT_F_F8_ByteSel<f32>>;
+ let SubtargetPredicate = isGFX125xOnly in
+ defm V_CVT_F32_FP8_gfx1250 : VOP1Inst<"v_cvt_f32_fp8_gfx1250", VOPProfile_Base_CVT_F_F8_ByteSel<f32, 1>>;
+
defm V_CVT_F32_BF8_OP_SEL : VOP1Inst<"v_cvt_f32_bf8_op_sel", VOPProfile_Base_CVT_F_F8_ByteSel<f32>>;
let True16Predicate = UseFakeTrue16Insts in {
@@ -714,9 +720,19 @@ class Cvt_F_F8_Pat_ByteSel<SDPatternOperator node, VOP3_Pseudo inst, bit HasOpSe
(inst $src0, (as_i32timm $byte_sel)))
>;
-let SubtargetPredicate = isGFX12Plus, OtherPredicates = [HasFP8ConversionInsts] in {
- def : Cvt_F_F8_Pat_ByteSel<int_amdgcn_cvt_f32_fp8, V_CVT_F32_FP8_OP_SEL_e64>;
- def : Cvt_F_F8_Pat_ByteSel<int_amdgcn_cvt_f32_bf8, V_CVT_F32_BF8_OP_SEL_e64>;
+let OtherPredicates = [HasFP8ConversionInsts] in {
+ // FIXME: This differs from downstream due to changes that haven't been upstreamed yet.
+ let SubtargetPredicate = isGFX12PlusNot12_50 in
+ def : Cvt_F_F8_Pat_ByteSel<int_amdgcn_cvt_f32_fp8, V_CVT_F32_FP8_OP_SEL_e64>;
+ let SubtargetPredicate = isGFX125xOnly in {
+ def : GCNPat<(int_amdgcn_cvt_f32_fp8 i32:$src0, timm:$byte_sel),
+ (V_CVT_F32_FP8_gfx1250_e64 $src0, DSTCLAMP.NONE, (as_i32timm $byte_sel))>;
+ def : GCNPat<(int_amdgcn_cvt_f32_fp8_e5m3 i32:$src0, timm:$byte_sel),
+ (V_CVT_F32_FP8_gfx1250_e64 $src0, DSTCLAMP.ENABLE, (as_i32timm $byte_sel))>;
+ }
+ // FIXME: This differs from downstream due to changes that haven't been upstreamed yet.
+ let SubtargetPredicate = isGFX12Plus in
+ def : Cvt_F_F8_Pat_ByteSel<int_amdgcn_cvt_f32_bf8, V_CVT_F32_BF8_OP_SEL_e64>;
}
class Cvt_PK_F32_F8_Pat_OpSel<SDPatternOperator node, int index,
@@ -1038,7 +1054,9 @@ multiclass VOP1_Real_FULL_t16_and_fake16_gfx1250<
VOP1_Real_FULL_with_name<GFX1250Gen, op, opName#"_fake16", asmName>;
}
-defm V_CVT_F32_FP8 : VOP1_Real_FULL_with_name<GFX12Gen, 0x06c, "V_CVT_F32_FP8_OP_SEL", "v_cvt_f32_fp8">;
+defm V_CVT_F32_FP8 : VOP1_Real_FULL_with_name<GFX12Not12_50Gen, 0x06c, "V_CVT_F32_FP8_OP_SEL", "v_cvt_f32_fp8">;
+defm V_CVT_F32_FP8 : VOP1_Real_FULL_with_name<GFX1250Gen, 0x06c, "V_CVT_F32_FP8_gfx1250", "v_cvt_f32_fp8">;
+
defm V_CVT_F32_BF8 : VOP1_Real_FULL_with_name<GFX12Gen, 0x06d, "V_CVT_F32_BF8_OP_SEL", "v_cvt_f32_bf8">;
defm V_CVT_PK_F32_FP8_fake16 : VOP1_Real_e32_with_name<GFX12Gen, 0x06e, "V_CVT_PK_F32_FP8_fake16", "v_cvt_pk_f32_fp8">;
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index f8edfec1100a2..3e01f8cd044e2 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -352,13 +352,13 @@ class VOP3FP8OpSel_src_bytesel_gfx11_gfx12<bits<10> op, VOPProfile p> : VOP3e_gf
let Inst{14} = !if(!and(p.HasOpSel, p.HasDst), src0_modifiers{3}, 0);
}
- class VOP3FP8OpSel_dst_bytesel_gfx11_gfx12<bits<10> op, VOPProfile p> : VOP3e_gfx10<op, p> {
- bits<2> byte_sel;
+class VOP3FP8OpSel_dst_bytesel_gfx11_gfx12<bits<10> op, VOPProfile p> : VOP3e_gfx10<op, p> {
+ bits<2> byte_sel;
- let Inst{11} = 0; // op_sel0
- let Inst{12} = 0; // op_sel1
- let Inst{14-13} = byte_sel; // op_sel2/3
- }
+ let Inst{11} = !if(!and(p.HasOpSel, p.HasSrc0Mods), src0_modifiers{2}, 0); // op_sel0
+ let Inst{12} = !if(!and(p.HasOpSel, p.HasSrc1Mods), src1_modifiers{2}, 0); // op_sel1
+ let Inst{14-13} = byte_sel; // op_sel2/3
+}
class VOP3DotOpSel_gfx11_gfx12<bits<10> op, VOPProfile p> :
VOP3e_t16_gfx11_gfx12<op, p>{
diff --git a/llvm/lib/TargetParser/TargetParser.cpp b/llvm/lib/TargetParser/TargetParser.cpp
index cae12f9a4ed3e..31123c5eb7ab7 100644
--- a/llvm/lib/TargetParser/TargetParser.cpp
+++ b/llvm/lib/TargetParser/TargetParser.cpp
@@ -445,6 +445,7 @@ void AMDGPU::fillAMDGPUFeatureMap(StringRef GPU, const Triple &T,
Features["prng-inst"] = true;
Features["transpose-load-f4f6-insts"] = true;
Features["fp8-conversion-insts"] = true;
+ Features["fp8e5m3-insts"] = true;
Features["permlane16-swap"] = true;
Features["ashr-pk-insts"] = true;
Features["atomic-buffer-pk-add-bf16-inst"] = true;
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
new file mode 100644
index 0000000000000..43c8d8318df1d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.e5m3.ll
@@ -0,0 +1,50 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
+
+declare float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32, i32)
+
+define float @test_cvt_f32_fp8_e5m3_byte0(i32 %a) {
+; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 clamp
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 %a, i32 0)
+ ret float %ret
+}
+
+define float @test_cvt_f32_fp8_e5m3_byte1(i32 %a) {
+; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:1 clamp
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 %a, i32 1)
+ ret float %ret
+}
+
+define float @test_cvt_f32_fp8_e5m3_byte2(i32 %a) {
+; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:2 clamp
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 %a, i32 2)
+ ret float %ret
+}
+
+define float @test_cvt_f32_fp8_e5m3_byte3(i32 %a) {
+; GFX1250-LABEL: test_cvt_f32_fp8_e5m3_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:3 clamp
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call float @llvm.amdgcn.cvt.f32.fp8.e5m3(i32 %a, i32 3)
+ ret float %ret
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.ll
index 16d32b73b9b0d..09b1ea7c55afe 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cvt.fp8.ll
@@ -6,6 +6,9 @@
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-TRUE16 %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX12,GFX12-FAKE16 %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX12 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-TRUE16 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250,GFX1250-FAKE16 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 < %s | FileCheck -check-prefixes=GFX1250 %s
declare float @llvm.amdgcn.cvt.f32.bf8(i32, i32)
declare float @llvm.amdgcn.cvt.f32.fp8(i32, i32)
@@ -38,6 +41,13 @@ define float @test_cvt_f32_bf8_byte0(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_bf8_e32 v0, v0
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_bf8_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_bf8_e32 v0, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.bf8(i32 %a, i32 0)
ret float %ret
}
@@ -58,6 +68,13 @@ define float @test_cvt_f32_bf8_byte1(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:1
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_bf8_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.bf8(i32 %a, i32 1)
ret float %ret
}
@@ -78,6 +95,13 @@ define float @test_cvt_f32_bf8_byte2(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_bf8_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.bf8(i32 %a, i32 2)
ret float %ret
}
@@ -98,6 +122,13 @@ define float @test_cvt_f32_bf8_byte3(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:3
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_bf8_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.bf8(i32 %a, i32 3)
ret float %ret
}
@@ -124,6 +155,13 @@ define float @test_cvt_f32_fp8_byte0(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_fp8_e32 v0, v0
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_fp8_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e32 v0, v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.fp8(i32 %a, i32 0)
ret float %ret
}
@@ -144,6 +182,13 @@ define float @test_cvt_f32_fp8_byte1(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:1
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_fp8_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.fp8(i32 %a, i32 1)
ret float %ret
}
@@ -164,6 +209,13 @@ define float @test_cvt_f32_fp8_byte2(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_fp8_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.fp8(i32 %a, i32 2)
ret float %ret
}
@@ -184,6 +236,13 @@ define float @test_cvt_f32_fp8_byte3(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:3
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_f32_fp8_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:3
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call float @llvm.amdgcn.cvt.f32.fp8(i32 %a, i32 3)
ret float %ret
}
@@ -204,6 +263,13 @@ define <2 x float> @test_cvt_pk_f32_bf8_word0(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_pk_f32_bf8_e32 v[0:1], v0
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_pk_f32_bf8_word0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_pk_f32_bf8_e32 v[0:1], v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.pk.f32.bf8(i32 %a, i1 false)
ret <2 x float> %ret
}
@@ -224,6 +290,13 @@ define <2 x float> @test_cvt_pk_f32_bf8_word1(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_pk_f32_bf8_e64 v[0:1], v0 op_sel:[1,0]
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_pk_f32_bf8_word1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_pk_f32_bf8_e64 v[0:1], v0 op_sel:[1,0]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.pk.f32.bf8(i32 %a, i1 true)
ret <2 x float> %ret
}
@@ -244,6 +317,13 @@ define <2 x float> @test_cvt_pk_f32_fp8_word0(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_pk_f32_fp8_e32 v[0:1], v0
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_pk_f32_fp8_word0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_pk_f32_fp8_e32 v[0:1], v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.pk.f32.fp8(i32 %a, i1 false)
ret <2 x float> %ret
}
@@ -264,6 +344,13 @@ define <2 x float> @test_cvt_pk_f32_fp8_word1(i32 %a) {
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: v_cvt_pk_f32_fp8_e64 v[0:1], v0 op_sel:[1,0]
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_pk_f32_fp8_word1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_pk_f32_fp8_e64 v[0:1], v0 op_sel:[1,0]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call <2 x float> @llvm.amdgcn.cvt.pk.f32.fp8(i32 %a, i1 true)
ret <2 x float> %ret
}
@@ -299,6 +386,24 @@ define i32 @test_cvt_pk_bf8_f32_word0(float %x, float %y, i32 %old) {
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-TRUE16-LABEL: test_cvt_pk_bf8_f32_word0:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_bf8_f32 v2.l, v0, v1
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_bf8_f32_word0:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_bf8_f32 v2, v0, v1
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.pk.bf8.f32(float %x, float %y, i32 %old, i1 false)
ret i32 %ret
}
@@ -335,6 +440,24 @@ define i32 @test_cvt_pk_bf8_f32_word1(float %x, float %y, i32 %old) {
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-TRUE16-LABEL: test_cvt_pk_bf8_f32_word1:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_bf8_f32 v2.h, v0, v1 op_sel:[0,0,1]
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_bf8_f32_word1:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_bf8_f32 v2, v0, v1 op_sel:[0,0,1]
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.pk.bf8.f32(float %x, float %y, i32 %old, i1 true)
ret i32 %ret
}
@@ -370,6 +493,24 @@ define i32 @test_cvt_pk_fp8_f32_word0(float %x, float %y, i32 %old) {
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.l, v0, v1
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word0:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32(float %x, float %y, i32 %old, i1 false)
ret i32 %ret
}
@@ -406,6 +547,24 @@ define i32 @test_cvt_pk_fp8_f32_word1(float %x, float %y, i32 %old) {
; GFX12-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-FAKE16-NEXT: v_mov_b32_e32 v0, v2
; GFX12-FAKE16-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-TRUE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-TRUE16: ; %bb.0:
+; GFX1250-TRUE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-TRUE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-TRUE16-NEXT: v_cvt_pk_fp8_f32 v2.h, v0, v1 op_sel:[0,0,1]
+; GFX1250-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-TRUE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-TRUE16-NEXT: s_set_pc_i64 s[30:31]
+;
+; GFX1250-FAKE16-LABEL: test_cvt_pk_fp8_f32_word1:
+; GFX1250-FAKE16: ; %bb.0:
+; GFX1250-FAKE16-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-FAKE16-NEXT: s_wait_kmcnt 0x0
+; GFX1250-FAKE16-NEXT: v_cvt_pk_fp8_f32 v2, v0, v1 op_sel:[0,0,1]
+; GFX1250-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-FAKE16-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-FAKE16-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.pk.fp8.f32(float %x, float %y, i32 %old, i1 true)
ret i32 %ret
}
@@ -429,6 +588,15 @@ define i32 @test_cvt_sr_bf8_f32_byte0(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_bf8_f32_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_bf8_f32 v2, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f32(float %x, i32 %r, i32 %old, i32 0)
ret i32 %ret
}
@@ -453,6 +621,15 @@ define i32 @test_cvt_sr_bf8_f32_byte1(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_bf8_f32_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_bf8_f32 v2, v0, v1 byte_sel:1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f32(float %x, i32 %r, i32 %old, i32 1)
ret i32 %ret
}
@@ -477,6 +654,15 @@ define i32 @test_cvt_sr_bf8_f32_byte2(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_bf8_f32_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_bf8_f32 v2, v0, v1 byte_sel:2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f32(float %x, i32 %r, i32 %old, i32 2)
ret i32 %ret
}
@@ -501,6 +687,15 @@ define i32 @test_cvt_sr_bf8_f32_byte3(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_bf8_f32_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_bf8_f32 v2, v0, v1 byte_sel:3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.bf8.f32(float %x, i32 %r, i32 %old, i32 3)
ret i32 %ret
}
@@ -524,6 +719,15 @@ define i32 @test_cvt_sr_fp8_f32_byte0(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32(float %x, i32 %r, i32 %old, i32 0)
ret i32 %ret
}
@@ -548,6 +752,15 @@ define i32 @test_cvt_sr_fp8_f32_byte1(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:1
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32(float %x, i32 %r, i32 %old, i32 1)
ret i32 %ret
}
@@ -572,6 +785,15 @@ define i32 @test_cvt_sr_fp8_f32_byte2(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte2:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:2
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32(float %x, i32 %r, i32 %old, i32 2)
ret i32 %ret
}
@@ -596,6 +818,15 @@ define i32 @test_cvt_sr_fp8_f32_byte3(float %x, i32 %r, i32 %old) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_mov_b32_e32 v0, v2
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_cvt_sr_fp8_f32_byte3:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_cvt_sr_fp8_f32 v2, v0, v1 byte_sel:3
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_mov_b32_e32 v0, v2
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%ret = tail call i32 @llvm.amdgcn.cvt.sr.fp8.f32(float %x, i32 %r, i32 %old, i32 3)
ret i32 %ret
}
@@ -619,6 +850,15 @@ define float @test_sext_cvt_f32_fp8(i16 %a) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:1
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_sext_cvt_f32_fp8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f32_fp8_e64 v0, v0 byte_sel:1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%a.sext = sext i16 %a to i32
%ret = tail call float @llvm.amdgcn.cvt.f32.fp8(i32 %a.sext, i32 1)
ret float %ret
@@ -643,6 +883,15 @@ define float @test_sext_cvt_f32_bf8(i16 %a) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:1
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_sext_cvt_f32_bf8:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_f32_bf8_e64 v0, v0 byte_sel:1
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%a.sext = sext i16 %a to i32
%ret = tail call float @llvm.amdgcn.cvt.f32.bf8(i32 %a.sext, i32 1)
ret float %ret
@@ -667,6 +916,15 @@ define <2 x float> @test_sext_cvt_pk_f32_bf8_word1(i16 %a) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_cvt_pk_f32_bf8_e64 v[0:1], v0 op_sel:[1,0]
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_sext_cvt_pk_f32_bf8_word1:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_pk_f32_bf8_e64 v[0:1], v0 op_sel:[1,0]
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%a.sext = sext i16 %a to i32
%ret = tail call <2 x float> @llvm.amdgcn.cvt.pk.f32.bf8(i32 %a.sext, i1 true)
ret <2 x float> %ret
@@ -691,6 +949,15 @@ define <2 x float> @test_sext_cvt_pk_f32_fp8_word0(i16 %a) {
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX12-NEXT: v_cvt_pk_f32_fp8_e32 v[0:1], v0
; GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX1250-LABEL: test_sext_cvt_pk_f32_fp8_word0:
+; GFX1250: ; %bb.0:
+; GFX1250-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX1250-NEXT: s_wait_kmcnt 0x0
+; GFX1250-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GFX1250-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT: v_cvt_pk_f32_fp8_e32 v[0:1], v0
+; GFX1250-NEXT: s_set_pc_i64 s[30:31]
%a.sext = sext i16 %a to i32
%ret = tail call <2 x float> @llvm.amdgcn.cvt.pk.f32.fp8(i32 %a.sext, i1 false)
ret <2 x float> %ret
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s
index 7b07c84d56680..9d437c7e7909f 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1-fake16.s
@@ -87,3 +87,12 @@ v_cvt_pk_f16_fp8 v1, s2
v_cvt_pk_f16_fp8 v1, 100
// GFX1250: v_cvt_pk_f16_fp8 v1, 0x64 ; encoding: [0xff,0xea,0x02,0x7e,0x64,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e32 v1, s3
+// GFX1250: v_cvt_f32_fp8_e32 v1, s3 ; encoding: [0x03,0xd8,0x02,0x7e]
+
+v_cvt_f32_fp8_e32 v1, 3
+// GFX1250: v_cvt_f32_fp8_e32 v1, 3 ; encoding: [0x83,0xd8,0x02,0x7e]
+
+v_cvt_f32_fp8_e32 v1, v3
+// GFX1250: v_cvt_f32_fp8_e32 v1, v3 ; encoding: [0x03,0xd9,0x02,0x7e]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s
index 30c62c957874d..ff16e80c2d93c 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop1.s
@@ -96,3 +96,12 @@ v_cvt_pk_f16_fp8 v1, s2
v_cvt_pk_f16_fp8 v1, 100
// GFX1250: v_cvt_pk_f16_fp8 v1, 0x64 ; encoding: [0xff,0xea,0x02,0x7e,0x64,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e32 v1, s3
+// GFX1250: v_cvt_f32_fp8_e32 v1, s3 ; encoding: [0x03,0xd8,0x02,0x7e]
+
+v_cvt_f32_fp8_e32 v1, 3
+// GFX1250: v_cvt_f32_fp8_e32 v1, 3 ; encoding: [0x83,0xd8,0x02,0x7e]
+
+v_cvt_f32_fp8_e32 v1, v3
+// GFX1250: v_cvt_f32_fp8_e32 v1, v3 ; encoding: [0x03,0xd9,0x02,0x7e]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
index b333541a0f573..5874bb76b36d1 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1-fake16.s
@@ -1,6 +1,51 @@
// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -show-encoding %s | FileCheck --check-prefix=GFX1250 %s
+v_cvt_f32_fp8_e64 v1, s3
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 ; encoding: [0x01,0x00,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, s3 byte_sel:1
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, s3 byte_sel:2
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, s3 byte_sel:3
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 ; encoding: [0x01,0x00,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3 byte_sel:1
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3 byte_sel:2
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3 byte_sel:3
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 ; encoding: [0x01,0x00,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 byte_sel:1
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 byte_sel:2
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 byte_sel:3
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 clamp ; encoding: [0x01,0x80,0xec,0xd5,0x03,0x01,0x00,0x00]
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 clamp ; encoding: [0x01,0x80,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8 v1, v3 byte_sel:1 clamp
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:1 clamp ; encoding: [0x01,0x90,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8 v1, v3 byte_sel:2 clamp
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:2 clamp ; encoding: [0x01,0x88,0xec,0xd5,0x03,0x01,0x00,0x00]
+
v_cvt_f32_bf16_e64 v5, v1
// GFX1250: v_cvt_f32_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
index df595fe562e0e..16bfc10fb16a7 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1.s
@@ -1,6 +1,51 @@
// NOTE: Assertions have been autogenerated by utils/update_mc_test_checks.py UTC_ARGS: --version 5
// RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -show-encoding %s | FileCheck --check-prefix=GFX1250 %s
+v_cvt_f32_fp8_e64 v1, s3
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 ; encoding: [0x01,0x00,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, s3 byte_sel:1
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, s3 byte_sel:2
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, s3 byte_sel:3
+// GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 ; encoding: [0x01,0x00,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3 byte_sel:1
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3 byte_sel:2
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, 3 byte_sel:3
+// GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 ; encoding: [0x01,0x00,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 byte_sel:1
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 byte_sel:2
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 byte_sel:3
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8_e64 v1, v3 clamp ; encoding: [0x01,0x80,0xec,0xd5,0x03,0x01,0x00,0x00]
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 clamp ; encoding: [0x01,0x80,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8 v1, v3 byte_sel:1 clamp
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:1 clamp ; encoding: [0x01,0x90,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+v_cvt_f32_fp8 v1, v3 byte_sel:2 clamp
+// GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:2 clamp ; encoding: [0x01,0x88,0xec,0xd5,0x03,0x01,0x00,0x00]
+
v_cvt_f32_bf16_e64 v5, v1
// GFX1250: v_cvt_f32_bf16_e64 v5, v1 ; encoding: [0x05,0x00,0xf2,0xd5,0x01,0x01,0x00,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
index b4000ce9425fe..ea22a8cbdda03 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16-fake16.s
@@ -86,6 +86,18 @@ v_cvt_f16_fp8 v1, v2 op_sel:[0,1] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf ba
// GFX1250: v_cvt_f16_fp8_e64_dpp v1, v2 op_sel:[0,1] byte_sel:3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x58,0xf7,0xd5,0xfa,0x00,0x00,0x00,0x02,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_cvt_f32_fp8 v1, v3 clamp quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x80,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:22: error: invalid operand for instruction
+
+v_cvt_f32_fp8 v1, v3 byte_sel:1 clamp quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:1 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x90,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
+v_cvt_f32_fp8 v1, v3 byte_sel:2 clamp quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:2 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x88,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
v_cvt_pk_f16_bf8 v1, v128 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1
// GFX1250: v_cvt_pk_f16_bf8_e64_dpp v1, v128 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x04,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
index 82d8245c86249..868bbe7cdde3a 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp16.s
@@ -98,6 +98,18 @@ v_cvt_f16_fp8 v128.l, v2 quad_perm:[0,1,2,3]
// GFX1250: v_cvt_f16_fp8_e64_dpp v128.l, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x80,0x00,0xf7,0xd5,0xfa,0x00,0x00,0x00,0x02,0xe4,0x00,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_cvt_f32_fp8 v1, v3 clamp quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x80,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:22: error: invalid operand for instruction
+
+v_cvt_f32_fp8 v1, v3 byte_sel:1 clamp quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:1 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x90,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
+v_cvt_f32_fp8 v1, v3 byte_sel:2 clamp quad_perm:[3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:2 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x88,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
v_cvt_pk_f16_bf8 v1, v128.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1
// GFX1250: v_cvt_pk_f16_bf8_e64_dpp v1, v128.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x04,0xff]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
index a8c40a7328363..e2c9bc4387138 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8-fake16.s
@@ -62,6 +62,18 @@ v_cvt_f16_fp8 v1, v2 op_sel:[0,1] byte_sel:3 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_f16_fp8_e64_dpp v1, v2 op_sel:[0,1] byte_sel:3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x58,0xf7,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_cvt_f32_fp8 v1, v3 clamp dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x80,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:22: error: invalid operand for instruction
+
+v_cvt_f32_fp8 v1, v3 byte_sel:1 clamp dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:1 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x90,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
+v_cvt_f32_fp8 v1, v3 byte_sel:2 clamp dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:2 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x88,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
v_cvt_pk_f16_bf8 v1, v128 dpp8:[7,6,5,4,3,2,1,0] fi:1
// GFX1250: v_cvt_pk_f16_bf8_e64_dpp v1, v128 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xea,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
index 5cd63a7d2a3ab..2ea30aeb38fc1 100644
--- a/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx1250_asm_vop3_from_vop1_dpp8.s
@@ -74,6 +74,18 @@ v_cvt_f16_fp8 v128.l, v2 dpp8:[7,6,5,4,3,2,1,0]
// GFX1250: v_cvt_f16_fp8_e64_dpp v128.l, v2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x80,0x00,0xf7,0xd5,0xe9,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
+v_cvt_f32_fp8 v1, v3 clamp dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x80,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:22: error: invalid operand for instruction
+
+v_cvt_f32_fp8 v1, v3 byte_sel:1 clamp dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:1 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x90,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
+v_cvt_f32_fp8 v1, v3 byte_sel:2 clamp dpp8:[7,6,5,4,3,2,1,0]
+// GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:2 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x88,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+// GFX12-ERR: :[[@LINE-2]]:39: error: not a valid operand.
+
v_cvt_pk_f16_bf8 v1, v128.l dpp8:[7,6,5,4,3,2,1,0] fi:1
// GFX1250: v_cvt_pk_f16_bf8_e64_dpp v1, v128.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xea,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
// GFX12-ERR: :[[@LINE-2]]:1: error: instruction not supported on this GPU
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt
index 622c57a20f860..dd472d58893f5 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1.txt
@@ -107,3 +107,12 @@
0x02,0xeb,0x02,0x7e
# GFX1250-REAL16: v_cvt_pk_f16_fp8 v1, v2.l ; encoding: [0x02,0xeb,0x02,0x7e]
# GFX1250-FAKE16: v_cvt_pk_f16_fp8 v1, v2 ; encoding: [0x02,0xeb,0x02,0x7e]
+
+0x03,0xd8,0x02,0x7e
+# GFX1250: v_cvt_f32_fp8_e32 v1, s3 ; encoding: [0x03,0xd8,0x02,0x7e]
+
+0x83,0xd8,0x02,0x7e
+# GFX1250: v_cvt_f32_fp8_e32 v1, 3 ; encoding: [0x83,0xd8,0x02,0x7e]
+
+0x03,0xd9,0x02,0x7e
+# GFX1250: v_cvt_f32_fp8_e32 v1, v3 ; encoding: [0x03,0xd9,0x02,0x7e]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt
index 149a054742ded..9656dcdeddae3 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop1_dpp16.txt
@@ -69,6 +69,12 @@
0xfa,0xee,0x02,0x7f,0x02,0xe4,0x00,0xff
# GFX1250-REAL16: v_cvt_f16_fp8_dpp v1.h, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0xfa,0xee,0x02,0x7f,0x02,0xe4,0x00,0xff]
+0xfa,0xd8,0x02,0x7e,0x03,0xe4,0x00,0xac
+# GFX1250: v_cvt_f32_fp8_dpp v1, v3 quad_perm:[0,1,2,3] row_mask:0xa bank_mask:0xc ; encoding: [0xfa,0xd8,0x02,0x7e,0x03,0xe4,0x00,0xac]
+
+0xfa,0xd8,0x02,0x7e,0x03,0x1b,0x00,0x2e
+# GFX1250: v_cvt_f32_fp8_dpp v1, v3 quad_perm:[3,2,1,0] row_mask:0x2 bank_mask:0xe ; encoding: [0xfa,0xd8,0x02,0x7e,0x03,0x1b,0x00,0x2e]
+
0xfa,0xec,0x02,0x7e,0x02,0xe4,0x04,0xff
# GFX1250-REAL16: v_cvt_pk_f16_bf8_dpp v1, v2.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xec,0x02,0x7e,0x02,0xe4,0x04,0xff]
# GFX1250-FAKE16: v_cvt_pk_f16_bf8_dpp v1, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0xfa,0xec,0x02,0x7e,0x02,0xe4,0x04,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
index be37aafb04024..ae98cf2e0ff58 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1.txt
@@ -2,6 +2,51 @@
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=+real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250,GFX1250-REAL16 %s
# RUN: llvm-mc -triple=amdgcn -mcpu=gfx1250 -mattr=-real-true16 -disassemble -show-encoding < %s | FileCheck -check-prefixes=GFX1250,GFX1250-FAKE16 %s
+0x01,0x00,0xec,0xd5,0x83,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, 3 ; encoding: [0x01,0x00,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+0x01,0x10,0xec,0xd5,0x83,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+0x01,0x08,0xec,0xd5,0x83,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+0x01,0x18,0xec,0xd5,0x83,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, 3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x83,0x00,0x00,0x00]
+
+0x01,0x00,0xec,0xd5,0x03,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, s3 ; encoding: [0x01,0x00,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+0x01,0x10,0xec,0xd5,0x03,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+0x01,0x08,0xec,0xd5,0x03,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+0x01,0x18,0xec,0xd5,0x03,0x00,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, s3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x03,0x00,0x00,0x00]
+
+0x01,0x00,0xec,0xd5,0x03,0x01,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, v3 ; encoding: [0x01,0x00,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+0x01,0x10,0xec,0xd5,0x03,0x01,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:1 ; encoding: [0x01,0x10,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+0x01,0x08,0xec,0xd5,0x03,0x01,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:2 ; encoding: [0x01,0x08,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+0x01,0x18,0xec,0xd5,0x03,0x01,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:3 ; encoding: [0x01,0x18,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+0x01,0x80,0xec,0xd5,0x03,0x01,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, v3 clamp ; encoding: [0x01,0x80,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+0x01,0x90,0xec,0xd5,0x03,0x01,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:1 clamp ; encoding: [0x01,0x90,0xec,0xd5,0x03,0x01,0x00,0x00]
+
+0x01,0x88,0xec,0xd5,0x03,0x01,0x00,0x00
+# GFX1250: v_cvt_f32_fp8_e64 v1, v3 byte_sel:2 clamp ; encoding: [0x01,0x88,0xec,0xd5,0x03,0x01,0x00,0x00]
+
0x05,0x00,0xf2,0xd5,0xc1,0x00,0x00,0x00
# GFX1250: v_cvt_f32_bf16_e64 v5, -1 ; encoding: [0x05,0x00,0xf2,0xd5,0xc1,0x00,0x00,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
index dedb25599eea8..eadd315ea411b 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp16.txt
@@ -98,6 +98,15 @@
# GFX1250-REAL16: v_cvt_f16_fp8_e64_dpp v128.l, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x80,0x00,0xf7,0xd5,0xfa,0x00,0x00,0x00,0x02,0xe4,0x00,0xff]
# GFX1250-FAKE16: v_cvt_f16_fp8_e64_dpp v128, v2 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x80,0x00,0xf7,0xd5,0xfa,0x00,0x00,0x00,0x02,0xe4,0x00,0xff]
+0x01,0x80,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff
+# GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x80,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+
+0x01,0x90,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff
+# GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:1 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x90,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+
+0x01,0x88,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff
+# GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:2 clamp quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x01,0x88,0xec,0xd5,0xfa,0x00,0x00,0x00,0x03,0x1b,0x00,0xff]
+
0x01,0x00,0xf6,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x04,0xff
# GFX1250-REAL16: v_cvt_pk_f16_bf8_e64_dpp v1, v128.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x04,0xff]
# GFX1250-FAKE16: v_cvt_pk_f16_bf8_e64_dpp v1, v128 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xfa,0x00,0x00,0x00,0x80,0xe4,0x04,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
index 3868abe246e5d..3b2fbe10c61a5 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx1250_dasm_vop3_from_vop1_dpp8.txt
@@ -70,6 +70,15 @@
# GFX1250-REAL16: v_cvt_f16_fp8_e64_dpp v150.l, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf7,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
# GFX1250-FAKE16: v_cvt_f16_fp8_e64_dpp v150, v2 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x96,0x00,0xf7,0xd5,0xea,0x00,0x00,0x00,0x02,0x77,0x39,0x05]
+0x01,0x80,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05
+# GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x80,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+
+0x01,0x90,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05
+# GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:1 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x90,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+
+0x01,0x88,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05
+# GFX1250: v_cvt_f32_fp8_e64_dpp v1, v3 byte_sel:2 clamp dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x01,0x88,0xec,0xd5,0xe9,0x00,0x00,0x00,0x03,0x77,0x39,0x05]
+
0x01,0x00,0xf6,0xd5,0xea,0x00,0x00,0x00,0x80,0x77,0x39,0x05
# GFX1250-REAL16: v_cvt_pk_f16_bf8_e64_dpp v1, v128.l dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xea,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
# GFX1250-FAKE16: v_cvt_pk_f16_bf8_e64_dpp v1, v128 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x01,0x00,0xf6,0xd5,0xea,0x00,0x00,0x00,0x80,0x77,0x39,0x05]
>From f93f8d504a6c1b0d9d7b08061b98a9dbf5ad128f Mon Sep 17 00:00:00 2001
From: John Harrison <harjohn at google.com>
Date: Tue, 8 Jul 2025 13:35:12 -0700
Subject: [PATCH 4/8] [lldb] Improving synchronization of MainLoopWindows.
(#147438)
This should improve synchronizing the MainLoopWindows monitor thread
with the main loop state.
This uses the `m_ready` and `m_event` event handles to manage when the
Monitor thread continues and adds new tests to cover additional use
cases.
I believe this should fix #147291 but it is hard to ensure a race
condition is fixed without running the CI on multiple
machines/configurations.
---------
Co-authored-by: Pavel Labath <pavel at labath.sk>
---
lldb/source/Host/windows/MainLoopWindows.cpp | 33 +++--
lldb/unittests/Host/MainLoopTest.cpp | 141 ++++++++++++++++++-
2 files changed, 165 insertions(+), 9 deletions(-)
diff --git a/lldb/source/Host/windows/MainLoopWindows.cpp b/lldb/source/Host/windows/MainLoopWindows.cpp
index b3322e8b3ae59..a1de895c0ba98 100644
--- a/lldb/source/Host/windows/MainLoopWindows.cpp
+++ b/lldb/source/Host/windows/MainLoopWindows.cpp
@@ -12,16 +12,16 @@
#include "lldb/Host/windows/windows.h"
#include "lldb/Utility/Status.h"
#include "llvm/Config/llvm-config.h"
-#include "llvm/Support/Casting.h"
#include "llvm/Support/WindowsError.h"
#include <algorithm>
#include <cassert>
-#include <cerrno>
-#include <csignal>
#include <ctime>
#include <io.h>
+#include <synchapi.h>
#include <thread>
#include <vector>
+#include <winbase.h>
+#include <winerror.h>
#include <winsock2.h>
using namespace lldb;
@@ -42,11 +42,12 @@ namespace {
class PipeEvent : public MainLoopWindows::IOEvent {
public:
explicit PipeEvent(HANDLE handle)
- : IOEvent(CreateEventW(NULL, /*bManualReset=*/FALSE,
+ : IOEvent(CreateEventW(NULL, /*bManualReset=*/TRUE,
/*bInitialState=*/FALSE, NULL)),
- m_handle(handle), m_ready(CreateEventW(NULL, /*bManualReset=*/FALSE,
+ m_handle(handle), m_ready(CreateEventW(NULL, /*bManualReset=*/TRUE,
/*bInitialState=*/FALSE, NULL)) {
assert(m_event && m_ready);
+ m_monitor_thread = std::thread(&PipeEvent::Monitor, this);
}
~PipeEvent() override {
@@ -65,15 +66,27 @@ class PipeEvent : public MainLoopWindows::IOEvent {
}
void WillPoll() override {
- if (!m_monitor_thread.joinable())
- m_monitor_thread = std::thread(&PipeEvent::Monitor, this);
+ if (WaitForSingleObject(m_event, /*dwMilliseconds=*/0) != WAIT_TIMEOUT) {
+ // The thread has already signalled that the data is available. No need
+ // for further polling until we consume that event.
+ return;
+ }
+ if (WaitForSingleObject(m_ready, /*dwMilliseconds=*/0) != WAIT_TIMEOUT) {
+ // The thread is already waiting for data to become available.
+ return;
+ }
+ // Start waiting.
+ SetEvent(m_ready);
}
- void Disarm() override { SetEvent(m_ready); }
+ void Disarm() override { ResetEvent(m_event); }
/// Monitors the handle performing a zero byte read to determine when data is
/// avaiable.
void Monitor() {
+ // Wait until the MainLoop tells us to start.
+ WaitForSingleObject(m_ready, INFINITE);
+
do {
char buf[1];
DWORD bytes_read = 0;
@@ -110,7 +123,11 @@ class PipeEvent : public MainLoopWindows::IOEvent {
continue;
}
+ // Notify that data is available on the pipe. It's important to set this
+ // before clearing m_ready to avoid a race with WillPoll.
SetEvent(m_event);
+ // Stop polling until we're told to resume.
+ ResetEvent(m_ready);
// Wait until the current read is consumed before doing the next read.
WaitForSingleObject(m_ready, INFINITE);
diff --git a/lldb/unittests/Host/MainLoopTest.cpp b/lldb/unittests/Host/MainLoopTest.cpp
index 502028ae1a343..30585d12fe81d 100644
--- a/lldb/unittests/Host/MainLoopTest.cpp
+++ b/lldb/unittests/Host/MainLoopTest.cpp
@@ -10,6 +10,7 @@
#include "TestingSupport/SubsystemRAII.h"
#include "lldb/Host/ConnectionFileDescriptor.h"
#include "lldb/Host/FileSystem.h"
+#include "lldb/Host/MainLoopBase.h"
#include "lldb/Host/PseudoTerminal.h"
#include "lldb/Host/common/TCPSocket.h"
#include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX
@@ -64,7 +65,7 @@ class MainLoopTest : public testing::Test {
};
} // namespace
-TEST_F(MainLoopTest, ReadObject) {
+TEST_F(MainLoopTest, ReadSocketObject) {
char X = 'X';
size_t len = sizeof(X);
ASSERT_TRUE(socketpair[0]->Write(&X, len).Success());
@@ -101,6 +102,144 @@ TEST_F(MainLoopTest, ReadPipeObject) {
ASSERT_EQ(1u, callback_count);
}
+TEST_F(MainLoopTest, MultipleReadsPipeObject) {
+ Pipe pipe;
+
+ ASSERT_TRUE(pipe.CreateNew().Success());
+
+ MainLoop loop;
+
+ std::future<void> async_writer = std::async(std::launch::async, [&] {
+ for (int i = 0; i < 5; ++i) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(500));
+ char X = 'X';
+ size_t len = sizeof(X);
+ ASSERT_THAT_EXPECTED(pipe.Write(&X, len), llvm::HasValue(1));
+ }
+ });
+
+ Status error;
+ lldb::FileSP file = std::make_shared<NativeFile>(
+ pipe.GetReadFileDescriptor(), File::eOpenOptionReadOnly, false);
+ auto handle = loop.RegisterReadObject(
+ file,
+ [&](MainLoopBase &loop) {
+ callback_count++;
+ if (callback_count == 5)
+ loop.RequestTermination();
+
+ // Read some data to ensure the handle is not in a readable state.
+ char buf[1024] = {0};
+ size_t len = sizeof(buf);
+ ASSERT_THAT_ERROR(file->Read(buf, len).ToError(), llvm::Succeeded());
+ EXPECT_EQ(len, 1);
+ EXPECT_EQ(buf[0], 'X');
+ },
+ error);
+ ASSERT_TRUE(error.Success());
+ ASSERT_TRUE(handle);
+ ASSERT_TRUE(loop.Run().Success());
+ ASSERT_EQ(5u, callback_count);
+ async_writer.wait();
+}
+
+TEST_F(MainLoopTest, PipeDelayBetweenRegisterAndRun) {
+ Pipe pipe;
+
+ ASSERT_TRUE(pipe.CreateNew().Success());
+
+ MainLoop loop;
+
+ Status error;
+ lldb::FileSP file = std::make_shared<NativeFile>(
+ pipe.GetReadFileDescriptor(), File::eOpenOptionReadOnly, false);
+ auto handle = loop.RegisterReadObject(
+ file,
+ [&](MainLoopBase &loop) {
+ callback_count++;
+
+ // Read some data to ensure the handle is not in a readable state.
+ char buf[1024] = {0};
+ size_t len = sizeof(buf);
+ ASSERT_THAT_ERROR(file->Read(buf, len).ToError(), llvm::Succeeded());
+ EXPECT_EQ(len, 2);
+ EXPECT_EQ(buf[0], 'X');
+ EXPECT_EQ(buf[1], 'X');
+ },
+ error);
+ auto cb = [&](MainLoopBase &) {
+ callback_count++;
+ char X = 'X';
+ size_t len = sizeof(X);
+ // Write twice and ensure we coalesce into a single read.
+ ASSERT_THAT_EXPECTED(pipe.Write(&X, len), llvm::HasValue(1));
+ ASSERT_THAT_EXPECTED(pipe.Write(&X, len), llvm::HasValue(1));
+ };
+ // Add a write that triggers a read events.
+ loop.AddCallback(cb, std::chrono::milliseconds(500));
+ loop.AddCallback([](MainLoopBase &loop) { loop.RequestTermination(); },
+ std::chrono::milliseconds(1000));
+ ASSERT_TRUE(error.Success());
+ ASSERT_TRUE(handle);
+
+ // Write between RegisterReadObject / Run should NOT invoke the callback.
+ cb(loop);
+ ASSERT_EQ(1u, callback_count);
+
+ ASSERT_TRUE(loop.Run().Success());
+ ASSERT_EQ(4u, callback_count);
+}
+
+TEST_F(MainLoopTest, NoSelfTriggersDuringPipeHandler) {
+ Pipe pipe;
+
+ ASSERT_TRUE(pipe.CreateNew().Success());
+
+ MainLoop loop;
+
+ Status error;
+ lldb::FileSP file = std::make_shared<NativeFile>(
+ pipe.GetReadFileDescriptor(), File::eOpenOptionReadOnly, false);
+ auto handle = loop.RegisterReadObject(
+ file,
+ [&](MainLoopBase &lop) {
+ callback_count++;
+
+ char X = 'Y';
+ size_t len = sizeof(X);
+ // writes / reads during the handler callback should NOT trigger itself.
+ ASSERT_THAT_EXPECTED(pipe.Write(&X, len), llvm::HasValue(1));
+
+ char buf[1024] = {0};
+ len = sizeof(buf);
+ ASSERT_THAT_ERROR(file->Read(buf, len).ToError(), llvm::Succeeded());
+ EXPECT_EQ(len, 2);
+ EXPECT_EQ(buf[0], 'X');
+ EXPECT_EQ(buf[1], 'Y');
+
+ if (callback_count == 2)
+ loop.RequestTermination();
+ },
+ error);
+ // Add a write that triggers a read event.
+ loop.AddPendingCallback([&](MainLoopBase &) {
+ char X = 'X';
+ size_t len = sizeof(X);
+ ASSERT_THAT_EXPECTED(pipe.Write(&X, len), llvm::HasValue(1));
+ });
+ loop.AddCallback(
+ [&](MainLoopBase &) {
+ char X = 'X';
+ size_t len = sizeof(X);
+ ASSERT_THAT_EXPECTED(pipe.Write(&X, len), llvm::HasValue(1));
+ },
+ std::chrono::milliseconds(500));
+ ASSERT_TRUE(error.Success());
+ ASSERT_TRUE(handle);
+ ASSERT_TRUE(loop.Run().Success());
+ ASSERT_EQ(2u, callback_count);
+}
+
TEST_F(MainLoopTest, NoSpuriousPipeReads) {
Pipe pipe;
>From b13d9c448d963ee1490f11c64e796955dd5ef495 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Tue, 8 Jul 2025 21:38:44 +0100
Subject: [PATCH 5/8] [DAG] SDPatternMatch - add matching for SELECT_CC
patterns to min/max like matchers (#147071)
Fixes #147083
---
llvm/include/llvm/CodeGen/SDPatternMatch.h | 42 ++++++++++++-------
.../CodeGen/SelectionDAGPatternMatchTest.cpp | 33 +++++++++++++++
2 files changed, 61 insertions(+), 14 deletions(-)
diff --git a/llvm/include/llvm/CodeGen/SDPatternMatch.h b/llvm/include/llvm/CodeGen/SDPatternMatch.h
index 7c5cdbbeb0ca8..2967532226197 100644
--- a/llvm/include/llvm/CodeGen/SDPatternMatch.h
+++ b/llvm/include/llvm/CodeGen/SDPatternMatch.h
@@ -655,6 +655,21 @@ struct MaxMin_match {
template <typename MatchContext>
bool match(const MatchContext &Ctx, SDValue N) {
+ auto MatchMinMax = [&](SDValue L, SDValue R, SDValue TrueValue,
+ SDValue FalseValue, ISD::CondCode CC) {
+ if ((TrueValue != L || FalseValue != R) &&
+ (TrueValue != R || FalseValue != L))
+ return false;
+
+ ISD::CondCode Cond =
+ TrueValue == L ? CC : getSetCCInverse(CC, L.getValueType());
+ if (!Pred_t::match(Cond))
+ return false;
+
+ return (LHS.match(Ctx, L) && RHS.match(Ctx, R)) ||
+ (Commutable && LHS.match(Ctx, R) && RHS.match(Ctx, L));
+ };
+
if (sd_context_match(N, Ctx, m_Opc(ISD::SELECT)) ||
sd_context_match(N, Ctx, m_Opc(ISD::VSELECT))) {
EffectiveOperands<ExcludeChain> EO_SELECT(N, Ctx);
@@ -670,23 +685,22 @@ struct MaxMin_match {
SDValue R = Cond->getOperand(EO_SETCC.FirstIndex + 1);
auto *CondNode =
cast<CondCodeSDNode>(Cond->getOperand(EO_SETCC.FirstIndex + 2));
-
- if ((TrueValue != L || FalseValue != R) &&
- (TrueValue != R || FalseValue != L)) {
- return false;
- }
-
- ISD::CondCode Cond =
- TrueValue == L ? CondNode->get()
- : getSetCCInverse(CondNode->get(), L.getValueType());
- if (!Pred_t::match(Cond)) {
- return false;
- }
- return (LHS.match(Ctx, L) && RHS.match(Ctx, R)) ||
- (Commutable && LHS.match(Ctx, R) && RHS.match(Ctx, L));
+ return MatchMinMax(L, R, TrueValue, FalseValue, CondNode->get());
}
}
+ if (sd_context_match(N, Ctx, m_Opc(ISD::SELECT_CC))) {
+ EffectiveOperands<ExcludeChain> EO_SELECT(N, Ctx);
+ assert(EO_SELECT.Size == 5);
+ SDValue L = N->getOperand(EO_SELECT.FirstIndex);
+ SDValue R = N->getOperand(EO_SELECT.FirstIndex + 1);
+ SDValue TrueValue = N->getOperand(EO_SELECT.FirstIndex + 2);
+ SDValue FalseValue = N->getOperand(EO_SELECT.FirstIndex + 3);
+ auto *CondNode =
+ cast<CondCodeSDNode>(N->getOperand(EO_SELECT.FirstIndex + 4));
+ return MatchMinMax(L, R, TrueValue, FalseValue, CondNode->get());
+ }
+
return false;
}
};
diff --git a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
index 16b6d32825307..1d1601ab6fba5 100644
--- a/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
+++ b/llvm/unittests/CodeGen/SelectionDAGPatternMatchTest.cpp
@@ -307,6 +307,23 @@ TEST_F(SelectionDAGPatternMatchTest, matchBinaryOp) {
SDValue UMinLikeULT = DAG->getSelect(DL, MVT::i32, ICMP_ULT, Op0, Op1);
SDValue UMinLikeULE = DAG->getSelect(DL, MVT::i32, ICMP_ULE, Op0, Op1);
+ SDValue CCSMaxLikeGT = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETGT);
+ SDValue CCSMaxLikeGE = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETGE);
+ SDValue CCSMaxLikeLT = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETLT);
+ SDValue CCSMaxLikeLE = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETLE);
+ SDValue CCUMaxLikeUGT = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETUGT);
+ SDValue CCUMaxLikeUGE = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETUGE);
+ SDValue CCUMaxLikeULT = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETULT);
+ SDValue CCUMaxLikeULE = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETULE);
+ SDValue CCSMinLikeLT = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETLT);
+ SDValue CCSMinLikeGT = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETGT);
+ SDValue CCSMinLikeLE = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETLE);
+ SDValue CCSMinLikeGE = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETGE);
+ SDValue CCUMinLikeULT = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETULT);
+ SDValue CCUMinLikeUGT = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETUGT);
+ SDValue CCUMinLikeULE = DAG->getSelectCC(DL, Op0, Op1, Op0, Op1, ISD::SETULE);
+ SDValue CCUMinLikeUGE = DAG->getSelectCC(DL, Op0, Op1, Op1, Op0, ISD::SETUGE);
+
SDValue SFAdd = DAG->getNode(ISD::STRICT_FADD, DL, {Float32VT, MVT::Other},
{DAG->getEntryNode(), Op2, Op2});
@@ -357,21 +374,37 @@ TEST_F(SelectionDAGPatternMatchTest, matchBinaryOp) {
EXPECT_TRUE(sd_match(SMax, m_SMaxLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(SMaxLikeGT, m_SMaxLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(SMaxLikeGE, m_SMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMaxLikeGT, m_SMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMaxLikeGE, m_SMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMaxLikeLT, m_SMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMaxLikeLE, m_SMaxLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(SMin, m_c_BinOp(ISD::SMIN, m_Value(), m_Value())));
EXPECT_TRUE(sd_match(SMin, m_SMin(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(SMin, m_SMinLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(SMinLikeLT, m_SMinLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(SMinLikeLE, m_SMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMinLikeGT, m_SMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMinLikeGE, m_SMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMinLikeLT, m_SMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCSMinLikeLE, m_SMinLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMax, m_c_BinOp(ISD::UMAX, m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMax, m_UMax(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMax, m_UMaxLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMaxLikeUGT, m_UMaxLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMaxLikeUGE, m_UMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMaxLikeUGT, m_UMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMaxLikeUGE, m_UMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMaxLikeULT, m_UMaxLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMaxLikeULE, m_UMaxLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMin, m_c_BinOp(ISD::UMIN, m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMin, m_UMin(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMin, m_UMinLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMinLikeULT, m_UMinLike(m_Value(), m_Value())));
EXPECT_TRUE(sd_match(UMinLikeULE, m_UMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMinLikeUGT, m_UMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMinLikeUGE, m_UMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMinLikeULT, m_UMinLike(m_Value(), m_Value())));
+ EXPECT_TRUE(sd_match(CCUMinLikeULE, m_UMinLike(m_Value(), m_Value())));
// By default, it matches any of the results.
EXPECT_TRUE(
>From 08ba72a92533de089800d0ae3d163c201308d91c Mon Sep 17 00:00:00 2001
From: Florian Hahn <flo at fhahn.com>
Date: Tue, 8 Jul 2025 22:43:51 +0200
Subject: [PATCH 6/8] [EarlyCSE,TTI] Don't create new, unused, instructions.
(#134534)
getOrCreateResultFromMemIntrinsic can modify the current function by
inserting new instructions without EarlyCSE keeping track of the
changes.
Introduce a new CanCreate argument, and update the function to only create
new instructions when CanCreate = true. Use it when appropriate.
Fixes https://github.com/llvm/llvm-project/issues/145183
---
.../llvm/Analysis/TargetTransformInfo.h | 13 +++++++------
.../llvm/Analysis/TargetTransformInfoImpl.h | 5 +++--
llvm/lib/Analysis/TargetTransformInfo.cpp | 5 +++--
.../AArch64/AArch64TargetTransformInfo.cpp | 8 ++++----
.../AArch64/AArch64TargetTransformInfo.h | 5 +++--
llvm/lib/Transforms/Scalar/EarlyCSE.cpp | 15 +++++++++------
.../Transforms/EarlyCSE/AArch64/intrinsics.ll | 18 +++++++++++++++++-
7 files changed, 46 insertions(+), 23 deletions(-)
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index c43870392361d..98b793aace7a3 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -1702,12 +1702,13 @@ class TargetTransformInfo {
/// unordered-atomic memory intrinsic.
LLVM_ABI unsigned getAtomicMemIntrinsicMaxElementSize() const;
- /// \returns A value which is the result of the given memory intrinsic. New
- /// instructions may be created to extract the result from the given intrinsic
- /// memory operation. Returns nullptr if the target cannot create a result
- /// from the given intrinsic.
- LLVM_ABI Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
- Type *ExpectedType) const;
+ /// \returns A value which is the result of the given memory intrinsic. If \p
+ /// CanCreate is true, new instructions may be created to extract the result
+ /// from the given intrinsic memory operation. Returns nullptr if the target
+ /// cannot create a result from the given intrinsic.
+ LLVM_ABI Value *
+ getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType,
+ bool CanCreate = true) const;
/// \returns The type to use in a loop expansion of a memcpy call.
LLVM_ABI Type *getMemcpyLoopLoweringType(
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 12f87226c5f57..ddc8a5eaffa94 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -983,8 +983,9 @@ class TargetTransformInfoImplBase {
return 0;
}
- virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
- Type *ExpectedType) const {
+ virtual Value *
+ getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType,
+ bool CanCreate = true) const {
return nullptr;
}
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 3ebd9d487ba04..8a470ebf85a16 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1294,8 +1294,9 @@ unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const {
}
Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
- IntrinsicInst *Inst, Type *ExpectedType) const {
- return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
+ IntrinsicInst *Inst, Type *ExpectedType, bool CanCreate) const {
+ return TTIImpl->getOrCreateResultFromMemIntrinsic(Inst, ExpectedType,
+ CanCreate);
}
Type *TargetTransformInfo::getMemcpyLoopLoweringType(
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 380faa6cf6939..adc905384bf53 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -4967,9 +4967,9 @@ void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
BaseT::getPeelingPreferences(L, SE, PP);
}
-Value *
-AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
- Type *ExpectedType) const {
+Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+ Type *ExpectedType,
+ bool CanCreate) const {
switch (Inst->getIntrinsicID()) {
default:
return nullptr;
@@ -4978,7 +4978,7 @@ AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
case Intrinsic::aarch64_neon_st4: {
// Create a struct type
StructType *ST = dyn_cast<StructType>(ExpectedType);
- if (!ST)
+ if (!CanCreate || !ST)
return nullptr;
unsigned NumElts = Inst->arg_size() - 1;
if (ST->getNumElements() != NumElts)
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 9ada70bd7086a..ff0ab68a16a88 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -270,8 +270,9 @@ class AArch64TTIImpl final : public BasicTTIImplBase<AArch64TTIImpl> {
void getPeelingPreferences(Loop *L, ScalarEvolution &SE,
TTI::PeelingPreferences &PP) const override;
- Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
- Type *ExpectedType) const override;
+ Value *
+ getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst, Type *ExpectedType,
+ bool CanCreate = true) const override;
bool getTgtMemIntrinsic(IntrinsicInst *Inst,
MemIntrinsicInfo &Info) const override;
diff --git a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
index b6cb987c0423f..0f8cc6ca6ed21 100644
--- a/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
+++ b/llvm/lib/Transforms/Scalar/EarlyCSE.cpp
@@ -958,7 +958,8 @@ class EarlyCSE {
bool overridingStores(const ParseMemoryInst &Earlier,
const ParseMemoryInst &Later);
- Value *getOrCreateResult(Instruction *Inst, Type *ExpectedType) const {
+ Value *getOrCreateResult(Instruction *Inst, Type *ExpectedType,
+ bool CanCreate) const {
// TODO: We could insert relevant casts on type mismatch.
// The load or the store's first operand.
Value *V;
@@ -971,7 +972,8 @@ class EarlyCSE {
V = II->getOperand(0);
break;
default:
- return TTI.getOrCreateResultFromMemIntrinsic(II, ExpectedType);
+ return TTI.getOrCreateResultFromMemIntrinsic(II, ExpectedType,
+ CanCreate);
}
} else {
V = isa<LoadInst>(Inst) ? Inst : cast<StoreInst>(Inst)->getValueOperand();
@@ -1255,9 +1257,10 @@ Value *EarlyCSE::getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst,
// For stores check the result values before checking memory generation
// (otherwise isSameMemGeneration may crash).
- Value *Result = MemInst.isStore()
- ? getOrCreateResult(Matching, Other->getType())
- : nullptr;
+ Value *Result =
+ MemInst.isStore()
+ ? getOrCreateResult(Matching, Other->getType(), /*CanCreate=*/false)
+ : nullptr;
if (MemInst.isStore() && InVal.DefInst != Result)
return nullptr;
@@ -1278,7 +1281,7 @@ Value *EarlyCSE::getMatchingValue(LoadValue &InVal, ParseMemoryInst &MemInst,
return nullptr;
if (!Result)
- Result = getOrCreateResult(Matching, Other->getType());
+ Result = getOrCreateResult(Matching, Other->getType(), /*CanCreate=*/true);
return Result;
}
diff --git a/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll b/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll
index 94b17510bb95d..826da89290691 100644
--- a/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll
+++ b/llvm/test/Transforms/EarlyCSE/AArch64/intrinsics.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -passes=early-cse -earlycse-debug-hash | FileCheck %s
-; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -aa-pipeline=basic-aa -passes='early-cse<memssa>' | FileCheck %s
+; RUN: opt < %s -S -mtriple=aarch64-none-linux-gnu -mattr=+neon -aa-pipeline=basic-aa -passes='early-cse<memssa>' -verify-analysis-invalidation | FileCheck %s
define <4 x i32> @test_cse(ptr %a, [2 x <4 x i32>] %s.coerce, i32 %n) {
; CHECK-LABEL: define <4 x i32> @test_cse(
@@ -324,6 +324,22 @@ for.end: ; preds = %for.cond
ret <4 x i32> %res.0
}
+define void @test_ld4_st4_no_cse(ptr %p, <16 x i8> %A, <16 x i8> %B) {
+; CHECK-LABEL: define void @test_ld4_st4_no_cse(
+; CHECK-SAME: ptr [[P:%.*]], <16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[LD:%.*]] = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr [[P]])
+; CHECK-NEXT: [[EXT:%.*]] = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } [[LD]], 0
+; CHECK-NEXT: tail call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> [[EXT]], <16 x i8> [[A]], <16 x i8> [[B]], <16 x i8> zeroinitializer, ptr [[P]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %ld = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0(ptr %p)
+ %ext = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %ld, 0
+ tail call void @llvm.aarch64.neon.st4.v16i8.p0(<16 x i8> %ext, <16 x i8> %A, <16 x i8> %B, <16 x i8> zeroinitializer, ptr %p)
+ ret void
+}
+
; Function Attrs: nounwind
declare void @llvm.aarch64.neon.st2.v4i32.p0(<4 x i32>, <4 x i32>, ptr nocapture)
>From df3e7fd2b145855a18733f591cd3359ecf698aa6 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Tue, 8 Jul 2025 13:54:11 -0700
Subject: [PATCH 7/8] [RISCV][IR] Implement verifier check for
llvm.experimental.vp.splice immediate. (#147458)
This applies the same check as llvm.vector.splice which checks that the immediate is in the range [-VL, VL-1] where VL is the minimum vector length. If vscale_range is available, the lower bound is used to increase the known minimum vector length for this check. This ensures the immediate is in range for any possible value of vscale that satisfies the vscale_range.
---
llvm/lib/IR/Verifier.cpp | 30 ++-
.../RISCV/rvv/vp-splice-mask-vectors.ll | 84 +++----
llvm/test/CodeGen/RISCV/rvv/vp-splice.ll | 212 +++++++++---------
llvm/test/Verifier/invalid-vp-intrinsics.ll | 33 +++
4 files changed, 210 insertions(+), 149 deletions(-)
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 227afe2b7b61b..eb747bc48a8a5 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6939,20 +6939,44 @@ void Verifier::visitVPIntrinsic(VPIntrinsic &VPI) {
break;
}
}
- if (VPI.getIntrinsicID() == Intrinsic::vp_fcmp) {
+
+ switch (VPI.getIntrinsicID()) {
+ case Intrinsic::vp_fcmp: {
auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
Check(CmpInst::isFPPredicate(Pred),
"invalid predicate for VP FP comparison intrinsic", &VPI);
+ break;
}
- if (VPI.getIntrinsicID() == Intrinsic::vp_icmp) {
+ case Intrinsic::vp_icmp: {
auto Pred = cast<VPCmpIntrinsic>(&VPI)->getPredicate();
Check(CmpInst::isIntPredicate(Pred),
"invalid predicate for VP integer comparison intrinsic", &VPI);
+ break;
}
- if (VPI.getIntrinsicID() == Intrinsic::vp_is_fpclass) {
+ case Intrinsic::vp_is_fpclass: {
auto TestMask = cast<ConstantInt>(VPI.getOperand(1));
Check((TestMask->getZExtValue() & ~static_cast<unsigned>(fcAllFlags)) == 0,
"unsupported bits for llvm.vp.is.fpclass test mask");
+ break;
+ }
+ case Intrinsic::experimental_vp_splice: {
+ VectorType *VecTy = cast<VectorType>(VPI.getType());
+ int64_t Idx = cast<ConstantInt>(VPI.getArgOperand(2))->getSExtValue();
+ int64_t KnownMinNumElements = VecTy->getElementCount().getKnownMinValue();
+ if (VPI.getParent() && VPI.getParent()->getParent()) {
+ AttributeList Attrs = VPI.getParent()->getParent()->getAttributes();
+ if (Attrs.hasFnAttr(Attribute::VScaleRange))
+ KnownMinNumElements *= Attrs.getFnAttrs().getVScaleRangeMin();
+ }
+ Check((Idx < 0 && std::abs(Idx) <= KnownMinNumElements) ||
+ (Idx >= 0 && Idx < KnownMinNumElements),
+ "The splice index exceeds the range [-VL, VL-1] where VL is the "
+ "known minimum number of elements in the vector. For scalable "
+ "vectors the minimum number of elements is determined from "
+ "vscale_range.",
+ &VPI);
+ break;
+ }
}
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 3b0b183537468..709269904dbd8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -10,7 +10,7 @@ declare <vscale x 16 x i1> @llvm.experimental.vp.splice.nxv16i1(<vscale x 16 x i
declare <vscale x 32 x i1> @llvm.experimental.vp.splice.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, i32, <vscale x 32 x i1>, i32, i32)
declare <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32, <vscale x 64 x i1>, i32, i32)
-define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -22,19 +22,19 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1(<vscale x 1 x i1> %va, <vscale x
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vslidedown.vi v9, v9, 1
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vslideup.vx v9, v8, a0
; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK-NEXT: ret
- %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 1, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 1 x i1> %v
}
-define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -46,19 +46,19 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_negative_offset(<vscale x 1 x i1
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma
+; CHECK-NEXT: addi a0, a0, -2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vx v9, v9, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vslideup.vi v9, v8, 2
; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK-NEXT: ret
- %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 -2, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 1 x i1> %v
}
-define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1i1_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -70,20 +70,20 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
; CHECK-NEXT: vmv.v.i v11, 0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v10, v10, 1, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x i1> @llvm.experimental.vp.splice.nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb, i32 1, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 1 x i1> %v
}
-define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -95,19 +95,19 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1(<vscale x 2 x i1> %va, <vscale x
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v9, 5
+; CHECK-NEXT: vslidedown.vi v9, v9, 3
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vslideup.vx v9, v8, a0
; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x i1> %v
}
-define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -119,19 +119,19 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_negative_offset(<vscale x 2 x i1
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmerge.vim v9, v10, 1, v0
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma
+; CHECK-NEXT: addi a0, a0, -4
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v9, v9, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 5
+; CHECK-NEXT: vslideup.vi v9, v8, 4
; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 -4, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x i1> %v
}
-define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i1_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -143,20 +143,20 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
; CHECK-NEXT: vmv.v.i v11, 0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v10, v10, 3, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i1> @llvm.experimental.vp.splice.nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb, i32 3, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 2 x i1> %v
}
-define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -180,7 +180,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1(<vscale x 4 x i1> %va, <vscale x
ret <vscale x 4 x i1> %v
}
-define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -204,7 +204,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_negative_offset(<vscale x 4 x i1
ret <vscale x 4 x i1> %v
}
-define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv4i1_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -229,7 +229,7 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
ret <vscale x 4 x i1> %v
}
-define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -253,7 +253,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1(<vscale x 8 x i1> %va, <vscale x
ret <vscale x 8 x i1> %v
}
-define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -277,7 +277,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_negative_offset(<vscale x 8 x i1
ret <vscale x 8 x i1> %v
}
-define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv8i1_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -302,7 +302,7 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
ret <vscale x 8 x i1> %v
}
-define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -326,7 +326,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1(<vscale x 16 x i1> %va, <vscal
ret <vscale x 16 x i1> %v
}
-define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -350,7 +350,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_negative_offset(<vscale x 16 x
ret <vscale x 16 x i1> %v
}
-define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb, <vscale x 16 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv16i1_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -376,7 +376,7 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
ret <vscale x 16 x i1> %v
}
-define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -400,7 +400,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1(<vscale x 32 x i1> %va, <vscal
ret <vscale x 32 x i1> %v
}
-define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -424,7 +424,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_negative_offset(<vscale x 32 x
ret <vscale x 32 x i1> %v
}
-define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va, <vscale x 32 x i1> %vb, <vscale x 32 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv32i1_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -450,7 +450,7 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
ret <vscale x 32 x i1> %v
}
-define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -474,7 +474,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1(<vscale x 64 x i1> %va, <vscal
ret <vscale x 64 x i1> %v
}
-define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -498,7 +498,7 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_negative_offset(<vscale x 64 x
ret <vscale x 64 x i1> %v
}
-define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, <vscale x 64 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv64i1_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -523,3 +523,5 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va,
%v = call <vscale x 64 x i1> @llvm.experimental.vp.splice.nxv64i1(<vscale x 64 x i1> %va, <vscale x 64 x i1> %vb, i32 5, <vscale x 64 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 64 x i1> %v
}
+
+attributes #0 = { vscale_range(2,0) }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
index 9c8c5da75ff7c..e6a57ae6b1ea5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice.ll
@@ -4,33 +4,33 @@
; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zfh,+zfbfmin,+zvfhmin,+zvfbfmin -verify-machineinstrs \
; RUN: < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
-define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i64> @test_vp_splice_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x i64> %v
}
-define <vscale x 2 x i64> @test_vp_splice_nxv2i64_negative_offset(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i64> @test_vp_splice_nxv2i64_negative_offset(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i64_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e64, m2, ta, ma
+; CHECK-NEXT: addi a0, a0, -3
+; CHECK-NEXT: vsetivli zero, 3, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 -3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x i64> %v
}
-define <vscale x 2 x i64> @test_vp_splice_nxv2i64_zero_offset(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i64> @test_vp_splice_nxv2i64_zero_offset(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i64_zero_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
@@ -40,98 +40,98 @@ define <vscale x 2 x i64> @test_vp_splice_nxv2i64_zero_offset(<vscale x 2 x i64>
ret <vscale x 2 x i64> %v
}
-define <vscale x 2 x i64> @test_vp_splice_nxv2i64_masked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i64> @test_vp_splice_nxv2i64_masked(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i64_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v8, v8, 3, v0.t
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v10, a0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i64> @llvm.experimental.vp.splice.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb, i32 3, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 2 x i64> %v
}
-define <vscale x 1 x i64> @test_vp_splice_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x i64> @test_vp_splice_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
- %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 1, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 1 x i64> %v
}
-define <vscale x 1 x i64> @test_vp_splice_nxv1i64_negative_offset(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x i64> @test_vp_splice_nxv1i64_negative_offset(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1i64_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
+; CHECK-NEXT: addi a0, a0, -2
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
- %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 -2, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 1 x i64> %v
}
-define <vscale x 1 x i64> @test_vp_splice_nxv1i64_masked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x i64> @test_vp_splice_nxv1i64_masked(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1i64_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v8, v8, 1, v0.t
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x i64> @llvm.experimental.vp.splice.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb, i32 1, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 1 x i64> %v
}
-define <vscale x 2 x i32> @test_vp_splice_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x i32> %v
}
-define <vscale x 2 x i32> @test_vp_splice_nxv2i32_negative_offset(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_negative_offset(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i32_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: addi a0, a0, -4
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 -4, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x i32> %v
}
-define <vscale x 2 x i32> @test_vp_splice_nxv2i32_masked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_masked(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i32_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v8, v8, 3, v0.t
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x i32> @llvm.experimental.vp.splice.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb, i32 3, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 2 x i32> %v
}
-define <vscale x 4 x i16> @test_vp_splice_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 4 x i16> @test_vp_splice_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, -5
@@ -144,7 +144,7 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16(<vscale x 4 x i16> %va, <vscal
ret <vscale x 4 x i16> %v
}
-define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv4i16_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, -5
@@ -157,7 +157,7 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16_negative_offset(<vscale x 4 x
ret <vscale x 4 x i16> %v
}
-define <vscale x 4 x i16> @test_vp_splice_nxv4i16_masked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 4 x i16> @test_vp_splice_nxv4i16_masked(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb, <vscale x 4 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv4i16_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, -5
@@ -170,7 +170,7 @@ define <vscale x 4 x i16> @test_vp_splice_nxv4i16_masked(<vscale x 4 x i16> %va,
ret <vscale x 4 x i16> %v
}
-define <vscale x 8 x i8> @test_vp_splice_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 8 x i8> @test_vp_splice_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, -5
@@ -183,7 +183,7 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8(<vscale x 8 x i8> %va, <vscale x
ret <vscale x 8 x i8> %v
}
-define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv8i8_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, -5
@@ -196,7 +196,7 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8_negative_offset(<vscale x 8 x i8
ret <vscale x 8 x i8> %v
}
-define <vscale x 8 x i8> @test_vp_splice_nxv8i8_masked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 8 x i8> @test_vp_splice_nxv8i8_masked(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb, <vscale x 8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv8i8_masked:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, -5
@@ -209,85 +209,85 @@ define <vscale x 8 x i8> @test_vp_splice_nxv8i8_masked(<vscale x 8 x i8> %va, <v
ret <vscale x 8 x i8> %v
}
-define <vscale x 1 x double> @test_vp_splice_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x double> @test_vp_splice_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
- %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 1, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 1 x double> %v
}
-define <vscale x 1 x double> @test_vp_splice_nxv1f64_negative_offset(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x double> @test_vp_splice_nxv1f64_negative_offset(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1f64_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e64, m1, ta, ma
+; CHECK-NEXT: addi a0, a0, -2
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: vslideup.vi v8, v9, 2
; CHECK-NEXT: ret
- %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 -5, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 -2, <vscale x 1 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 1 x double> %v
}
-define <vscale x 1 x double> @test_vp_splice_nxv1f64_masked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 1 x double> @test_vp_splice_nxv1f64_masked(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv1f64_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v8, v8, 1, v0.t
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 5, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 1 x double> @llvm.experimental.vp.splice.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, i32 1, <vscale x 1 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 1 x double> %v
}
-define <vscale x 2 x float> @test_vp_splice_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x float> @test_vp_splice_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x float> %v
}
-define <vscale x 2 x float> @test_vp_splice_nxv2f32_negative_offset(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x float> @test_vp_splice_nxv2f32_negative_offset(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2f32_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: addi a0, a0, -3
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
- %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 -3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x float> %v
}
-define <vscale x 2 x float> @test_vp_splice_nxv2f32_masked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x float> @test_vp_splice_nxv2f32_masked(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2f32_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v8, v8, 3, v0.t
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x float> @llvm.experimental.vp.splice.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, i32 3, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 2 x float> %v
}
-define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) nounwind {
+define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv16i64:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a4, vlenb
@@ -355,7 +355,7 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64(<vscale x 16 x i64> %va, <vs
ret <vscale x 16 x i64> %v
}
-define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16 x i64> %va, <vscale x 16 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) nounwind {
+define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16 x i64> %va, <vscale x 16 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv16i64_negative_offset:
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a5, vlenb
@@ -428,85 +428,85 @@ define <vscale x 16 x i64> @test_vp_splice_nxv16i64_negative_offset(<vscale x 16
ret <vscale x 16 x i64> %v
}
-define <vscale x 2 x half> @test_vp_splice_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x half> @test_vp_splice_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x half> %v
}
-define <vscale x 2 x half> @test_vp_splice_nxv2f16_negative_offset(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x half> @test_vp_splice_nxv2f16_negative_offset(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2f16_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e16, mf2, ta, ma
+; CHECK-NEXT: addi a0, a0, -3
+; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
- %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 -3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x half> %v
}
-define <vscale x 2 x half> @test_vp_splice_nxv2f16_masked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x half> @test_vp_splice_nxv2f16_masked(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2f16_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v8, v8, 3, v0.t
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x half> @llvm.experimental.vp.splice.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, i32 3, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 2 x half> %v
}
-define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2bf16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
+; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x bfloat> %v
}
-define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_negative_offset(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_negative_offset(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2bf16_negative_offset:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetivli zero, 5, e16, mf2, ta, ma
+; CHECK-NEXT: addi a0, a0, -3
+; CHECK-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 5
+; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
- %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 -3, <vscale x 2 x i1> splat (i1 1), i32 %evla, i32 %evlb)
ret <vscale x 2 x bfloat> %v
}
-define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_masked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) {
+define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_masked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) #0 {
; CHECK-LABEL: test_vp_splice_nxv2bf16_masked:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, a0, -5
+; CHECK-NEXT: addi a0, a0, -3
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT: vslidedown.vi v8, v8, 3, v0.t
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 5, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
+ %v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 3, <vscale x 2 x i1> %mask, i32 %evla, i32 %evlb)
ret <vscale x 2 x bfloat> %v
}
-define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i32_with_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -518,7 +518,7 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_firstelt(i32 %first, <vsc
ret <vscale x 2 x i32> %v
}
-define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_splat_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_splat_firstelt(i32 %first, <vscale x 2 x i32> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) #0 {
; CHECK-LABEL: test_vp_splice_nxv2i32_with_splat_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -531,7 +531,7 @@ define <vscale x 2 x i32> @test_vp_splice_nxv2i32_with_splat_firstelt(i32 %first
ret <vscale x 2 x i32> %v
}
-define <vscale x 2 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+define <vscale x 2 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first, <vscale x 2 x float> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) #0 {
; CHECK-LABEL: test_vp_splice_nxv2f32_with_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
@@ -543,7 +543,7 @@ define <vscale x 2 x float> @test_vp_splice_nxv2f32_with_firstelt(float %first,
ret <vscale x 2 x float> %v
}
-define <vscale x 2 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+define <vscale x 2 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <vscale x 2 x half> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) #0 {
; ZVFH-LABEL: test_vp_splice_nxv2f16_with_firstelt:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
@@ -563,7 +563,7 @@ define <vscale x 2 x half> @test_vp_splice_nxv2f16_with_firstelt(half %first, <v
ret <vscale x 2 x half> %v
}
-define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_with_firstelt(bfloat %first, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_with_firstelt(bfloat %first, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %mask, i32 zeroext %evl) #0 {
; CHECK-LABEL: test_vp_splice_nxv2bf16_with_firstelt:
; CHECK: # %bb.0:
; CHECK-NEXT: fmv.x.h a1, fa0
@@ -575,3 +575,5 @@ define <vscale x 2 x bfloat> @test_vp_splice_nxv2bf16_with_firstelt(bfloat %firs
%v = call <vscale x 2 x bfloat> @llvm.experimental.vp.splice.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, i32 0, <vscale x 2 x i1> %mask, i32 1, i32 %evl)
ret <vscale x 2 x bfloat> %v
}
+
+attributes #0 = { nounwind vscale_range(2,0) }
diff --git a/llvm/test/Verifier/invalid-vp-intrinsics.ll b/llvm/test/Verifier/invalid-vp-intrinsics.ll
index 08639352c3ea9..9923867757e7a 100644
--- a/llvm/test/Verifier/invalid-vp-intrinsics.ll
+++ b/llvm/test/Verifier/invalid-vp-intrinsics.ll
@@ -33,3 +33,36 @@ define void @test_vp_icmp(<4 x i32> %a, <4 x i32> %b, <4 x i1> %m, i32 %n) {
%r1 = call <4 x i1> @llvm.vp.icmp.v4i32(<4 x i32> %a, <4 x i32> %b, metadata !"oeq", <4 x i1> %m, i32 %n)
ret void
}
+
+; CHECK: The splice index exceeds the range [-VL, VL-1] where VL is the known minimum number of elements in the vector
+define <2 x double> @splice_v2f64_idx_neg3(<2 x double> %a, <2 x double> %b, i32 %evl1, i32 %evl2) #0 {
+ %res = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %a, <2 x double> %b, i32 -3, <2 x i1> splat (i1 1), i32 %evl1, i32 %evl2)
+ ret <2 x double> %res
+}
+
+; CHECK: The splice index exceeds the range [-VL, VL-1] where VL is the known minimum number of elements in the vector
+define <vscale x 2 x double> @splice_nxv2f64_idx_neg3_vscale_min1(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 %evl1, i32 %evl2) #0 {
+ %res = call <vscale x 2 x double> @llvm.experimental.vp.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -3, <vscale x 2 x i1> splat (i1 1), i32 %evl1, i32 %evl2)
+ ret <vscale x 2 x double> %res
+}
+
+; CHECK: The splice index exceeds the range [-VL, VL-1] where VL is the known minimum number of elements in the vector
+define <vscale x 2 x double> @splice_nxv2f64_idx_neg5_vscale_min2(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 %evl1, i32 %evl2) #1 {
+ %res = call <vscale x 2 x double> @llvm.experimental.vp.splice.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, i32 -5, <vscale x 2 x i1> splat (i1 1), i32 %evl1, i32 %evl2)
+ ret <vscale x 2 x double> %res
+}
+
+; CHECK: The splice index exceeds the range [-VL, VL-1] where VL is the known minimum number of elements in the vector
+define <2 x double> @splice_v2f64_idx2(<2 x double> %a, <2 x double> %b, i32 %evl1, i32 %evl2) #0 {
+ %res = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %a, <2 x double> %b, i32 2, <2 x i1> splat (i1 1), i32 %evl1, i32 %evl2)
+ ret <2 x double> %res
+}
+
+; CHECK: The splice index exceeds the range [-VL, VL-1] where VL is the known minimum number of elements in the vector
+define <2 x double> @splice_v2f64_idx3(<2 x double> %a, <2 x double> %b, i32 %evl1, i32 %evl2) #1 {
+ %res = call <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double> %a, <2 x double> %b, i32 4, <2 x i1> splat (i1 1), i32 %evl1, i32 %evl2)
+ ret <2 x double> %res
+}
+
+attributes #0 = { vscale_range(1,16) }
+attributes #1 = { vscale_range(2,16) }
>From 1e0b0c99a1008de66cec6dbc4c667ed0e17a8c69 Mon Sep 17 00:00:00 2001
From: Dean Sturtevant <dsturtevant at google.com>
Date: Wed, 9 Jul 2025 09:24:45 -0400
Subject: [PATCH 8/8] [bazel] Update after
db7888ca9aef6c203b363bbb395549b4e6cfa9d4 (#146732)
---
.../llvm-project-overlay/mlir/BUILD.bazel | 37 +++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 93bbe736c59c2..51ed983b36125 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -857,6 +857,7 @@ mlir_c_api_cc_library(
],
)
+
mlir_c_api_cc_library(
name = "CAPIMLProgram",
srcs = [
@@ -11550,6 +11551,42 @@ cc_library(
],
)
+td_library(
+ name = "TransformTuneExtensionTdFiles",
+ srcs = glob(["include/mlir/Dialect/Transform/TuneExtension/*.td"]),
+ deps = [
+ ":BuiltinDialectTdFiles",
+ ":TransformDialectTdFiles",
+ ],
+)
+
+gentbl_cc_library(
+ name = "TransformTuneExtensionOpsIncGen",
+ tbl_outs = {
+ "include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.h.inc": [
+ "-gen-op-decls",
+ ],
+ "include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.cpp.inc": [
+ "-gen-op-defs",
+ ],
+ },
+ tblgen = ":mlir-tblgen",
+ td_file = "include/mlir/Dialect/Transform/TuneExtension/TuneExtensionOps.td",
+ deps = [":TransformTuneExtensionTdFiles"],
+)
+
+cc_library(
+ name = "TransformTuneExtension",
+ srcs = glob(["lib/Dialect/Transform/TuneExtension/*.cpp"]),
+ hdrs = glob(["include/mlir/Dialect/Transform/TuneExtension/*.h"]),
+ deps =[":IR",
+ ":TransformDialect",
+ ":TransformDialectInterfaces",
+ ":TransformOpsIncGen",
+ ":TransformTuneExtensionOpsIncGen",
+ "//llvm:Support"]
+)
+
gentbl_cc_library(
name = "TransformIRDLExtensionOpsIncGen",
tbl_outs = {
More information about the llvm-commits
mailing list