[clang] [Clang] Restrict AMDGCN image built-ins (PR #180949)
Alexey Sachkov via cfe-commits
cfe-commits at lists.llvm.org
Tue Feb 17 01:25:34 PST 2026
https://github.com/AlexeySachkov updated https://github.com/llvm/llvm-project/pull/180949
>From 7a11a80141a8ab5a4d3f61ac65ef59fe4dde8110 Mon Sep 17 00:00:00 2001
From: Alexey Sachkov <alexey.sachkov at amd.com>
Date: Wed, 11 Feb 2026 08:06:37 -0600
Subject: [PATCH 1/3] [Clang] Simplify AMDGCN image built-ins
This is a follow-up to llvm/llvm-project#179511 which introduced
validation for the `dmask` argument of the aforementioned built-ins to
match LLVM IR verifier behavior.
Additionally, to simplify the interface, the `dmask` argument was dropped from
almost all built-ins, because it can easily be inferred from the built-in.
The only exception are gather built-ins where we do need to pass different
`dmask` values.
---
clang/include/clang/Basic/BuiltinsAMDGPU.td | 220 +++++++++---------
clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp | 36 ++-
clang/lib/Sema/SemaAMDGPU.cpp | 20 +-
clang/test/CodeGen/builtins-extended-image.c | 148 ++++++------
clang/test/CodeGen/builtins-image-load.c | 168 ++++++-------
clang/test/CodeGen/builtins-image-store.c | 112 ++++-----
...iltins-extended-image-param-gfx1100-err.cl | 95 ++++----
...uiltins-extended-image-param-gfx942-err.cl | 80 +++----
.../builtins-image-load-param-gfx1100-err.cl | 84 +++----
.../builtins-image-load-param-gfx942-err.cl | 96 ++++----
.../builtins-image-store-param-gfx1100-err.cl | 56 ++---
.../builtins-image-store-param-gfx942-err.cl | 56 ++---
.../half-float16-vector-compatibility.cl | 4 +-
13 files changed, 615 insertions(+), 560 deletions(-)
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index c1ca7d4fd8e77..0556c3f3d363f 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -921,114 +921,114 @@ def __builtin_amdgcn_cooperative_atomic_store_8x16B : AMDGPUBuiltin<"void(_ExtVe
//===----------------------------------------------------------------------===//
// Image builtins
//===----------------------------------------------------------------------===//
-def __builtin_amdgcn_image_load_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_lz_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_load_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_lz_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
def __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index bff1ed3d2ec19..a0a992ae9503e 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -219,7 +219,8 @@ static llvm::Value *loadTextureDescPtorAsVec8I32(CodeGenFunction &CGF,
llvm::CallInst *
emitAMDGCNImageOverloadedReturnType(clang::CodeGen::CodeGenFunction &CGF,
const clang::CallExpr *E,
- unsigned IntrinsicID, bool IsImageStore) {
+ unsigned IntrinsicID, bool IsImageStore,
+ bool InsertDMaskImplicitArg = true) {
auto findTextureDescIndex = [&CGF](const CallExpr *E) -> unsigned {
QualType TexQT = CGF.getContext().AMDGPUTextureTy;
for (unsigned I = 0, N = E->getNumArgs(); I < N; ++I) {
@@ -243,13 +244,37 @@ emitAMDGCNImageOverloadedReturnType(clang::CodeGen::CodeGenFunction &CGF,
llvm::report_fatal_error("Invalid argument count for image builtin");
}
- for (unsigned I = 0; I < E->getNumArgs(); ++I) {
- llvm::Value *V = CGF.EmitScalarExpr(E->getArg(I));
- if (I == RsrcIndex)
+ auto handleArgument = [&](unsigned Index) -> void {
+ llvm::Value *V = CGF.EmitScalarExpr(E->getArg(Index));
+ if (Index == RsrcIndex)
V = loadTextureDescPtorAsVec8I32(CGF, V);
Args.push_back(V);
+ };
+
+ if (InsertDMaskImplicitArg) {
+ llvm::Type *TyForDMask = IsImageStore
+ ? CGF.ConvertType(E->getArg(0)->getType())
+ : CGF.ConvertType(E->getType());
+ // Implicit DMask is an all-ones mask of N bits where N is the number of
+ // elements in return type.
+ int DMask = 1;
+ if (auto *VTy = dyn_cast<llvm::FixedVectorType>(TyForDMask))
+ DMask = static_cast<int>(VTy->getNumElements());
+ DMask = (1 << DMask) - 1;
+
+ // Store intrinsics have output argument first and then DMask, all other
+ // intrinsics have DMask as fist argument.
+ if (IsImageStore)
+ handleArgument(0);
+ Args.push_back(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), DMask));
}
+ unsigned StartIndex =
+ static_cast<int>(InsertDMaskImplicitArg && IsImageStore);
+ for (unsigned I = StartIndex; I < E->getNumArgs(); ++I)
+ handleArgument(I);
+
llvm::Type *RetTy = IsImageStore ? CGF.VoidTy : CGF.ConvertType(E->getType());
llvm::CallInst *Call = CGF.Builder.CreateIntrinsic(RetTy, IntrinsicID, Args);
return Call;
@@ -1239,7 +1264,8 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
*this, E, Intrinsic::amdgcn_image_sample_d_2darray, false);
case clang::AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32:
return emitAMDGCNImageOverloadedReturnType(
- *this, E, Intrinsic::amdgcn_image_gather4_lz_2d, false);
+ *this, E, Intrinsic::amdgcn_image_gather4_lz_2d, false,
+ /*InsertDMaskImplicitArg = */ false);
case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
llvm::FixedVectorType *VT = FixedVectorType::get(Builder.getInt32Ty(), 8);
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index c9833f5083d07..acebbf6c3aa8f 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -249,7 +249,22 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
unsigned ArgCount = TheCall->getNumArgs() - 1;
llvm::APSInt Result;
- return (SemaRef.BuiltinConstantArg(TheCall, 0, Result)) ||
+ bool ExtraGatherChecks = false;
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32) {
+ // Gather has the DMask argument.
+ const Type *RetTy = TheCall->getType().getTypePtr();
+ constexpr int Low = 1;
+ int High = 1;
+ if (auto *VTy = dyn_cast<VectorType>(RetTy))
+ High = VTy->getNumElements();
+ // Which cannot accept values that have more bits set that there are
+ // elements in the return type.
+ High = (1 << High) - 1;
+ ExtraGatherChecks |= SemaRef.BuiltinConstantArgRange(
+ TheCall, 0, Low, High, /* RangeIsError = */ true);
+ }
+
+ return ExtraGatherChecks ||
(SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
(SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result));
}
@@ -293,8 +308,7 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
unsigned ArgCount = TheCall->getNumArgs() - 1;
llvm::APSInt Result;
- return (SemaRef.BuiltinConstantArg(TheCall, 1, Result)) ||
- (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
+ return (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
(SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result));
}
case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8:
diff --git a/clang/test/CodeGen/builtins-extended-image.c b/clang/test/CodeGen/builtins-extended-image.c
index 491bbcf7d5412..44a757b75e3a2 100644
--- a/clang/test/CodeGen/builtins-extended-image.c
+++ b/clang/test/CodeGen/builtins-extended-image.c
@@ -151,12 +151,12 @@ float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
// CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP3]]
//
float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(100, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_1d_v4f32_f32(
@@ -182,12 +182,12 @@ float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_1d_v4f32_f32(
@@ -214,12 +214,12 @@ float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1d.v4f32.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1d.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_2d_v4f32_f32(
@@ -245,12 +245,12 @@ float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_2d_v4f32_f32(
@@ -277,12 +277,12 @@ float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32.v8i32.v4i32(i32 10, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(10, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_2d_v4f32_f32(
@@ -312,12 +312,12 @@ float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP6]], align 32
// CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP7]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP8:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2d.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP7]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP8]]
//
float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_3d_v4f32_f32(
// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
@@ -343,12 +343,12 @@ float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.3d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.3d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_3d_v4f32_f32(
@@ -376,12 +376,12 @@ float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.3d.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.3d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_3d_v4f32_f32(
@@ -414,12 +414,12 @@ float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
// CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP11]]
//
float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_cube_v4f32_f32(
@@ -446,12 +446,12 @@ float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.cube.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.cube.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_cube_v4f32_f32(
@@ -479,12 +479,12 @@ float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.cube.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.cube.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_1darray_v4f32_f32(
@@ -510,12 +510,12 @@ float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i3
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_1darray_v4f32_f32(
@@ -542,12 +542,12 @@ float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, in
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_1darray_v4f32_f32(
@@ -575,12 +575,12 @@ float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1darray.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1darray.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_2darray_v4f32_f32(
@@ -607,12 +607,12 @@ float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_2darray_v4f32_f32(
@@ -640,12 +640,12 @@ float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, in
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_2darray_v4f32_f32(
@@ -676,12 +676,12 @@ float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2darray.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2darray.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP9]]
//
float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_1d_v4f16_f32(
@@ -706,12 +706,12 @@ float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
// CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.1d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.1d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP3]]
//
half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(100, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_1d_v4f16_f32(
@@ -737,12 +737,12 @@ half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.1d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.1d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_1d_v4f16_f32(
@@ -769,12 +769,12 @@ half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.1d.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.1d.v4f16.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_2d_v4f16_f32(
@@ -800,12 +800,12 @@ half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.2d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.2d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_2d_v4f16_f32(
@@ -832,12 +832,12 @@ half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.2d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.2d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_2d_v4f16_f32(
@@ -867,12 +867,12 @@ half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP6]], align 32
// CHECK-NEXT: [[TMP7:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP8:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.2d.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP7]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP8:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.2d.v4f16.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP7]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP8]]
//
half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_3d_v4f16_f32(
@@ -899,12 +899,12 @@ half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.3d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.3d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_3d_v4f16_f32(
@@ -932,12 +932,12 @@ half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.3d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.3d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP6]]
//
half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_3d_v4f16_f32(
@@ -970,12 +970,12 @@ half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
// CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP11:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.3d.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP11:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.3d.v4f16.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP11]]
//
half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_cube_v4f16_f32(
@@ -1002,12 +1002,12 @@ half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.cube.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.cube.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_cube_v4f16_f32(
@@ -1035,12 +1035,12 @@ half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i3
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.cube.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.cube.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP6]]
//
half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_1darray_v4f16_f32(
@@ -1066,12 +1066,12 @@ half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.1darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.1darray.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_1darray_v4f16_f32(
@@ -1098,12 +1098,12 @@ half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.1darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.1darray.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_1darray_v4f16_f32(
@@ -1131,12 +1131,12 @@ half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.1darray.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.1darray.v4f16.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP6]]
//
half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_2darray_v4f16_f32(
@@ -1163,12 +1163,12 @@ half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.2darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.lz.2darray.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_2darray_v4f16_f32(
@@ -1196,12 +1196,12 @@ half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.2darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.l.2darray.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP6]]
//
half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_2darray_v4f16_f32(
@@ -1232,12 +1232,12 @@ half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP9:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.2darray.v4f16.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP9:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.d.2darray.v4f16.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP9]]
//
half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_lz_2d_f32_f32(
@@ -1268,7 +1268,7 @@ half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int
//
float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_l_2d_f32_f32(
@@ -1300,7 +1300,7 @@ float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, _
//
float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_d_2d_f32_f32(
@@ -1335,7 +1335,7 @@ float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __
//
float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_lz_2darray_f32_f32(
@@ -1367,7 +1367,7 @@ float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __
//
float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_l_2darray_f32_f32(
@@ -1400,7 +1400,7 @@ float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i
//
float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_d_2darray_f32_f32(
@@ -1436,5 +1436,5 @@ float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i3
//
float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
diff --git a/clang/test/CodeGen/builtins-image-load.c b/clang/test/CodeGen/builtins-image-load.c
index d2337eab7dbc8..04d4610ccb7a7 100644
--- a/clang/test/CodeGen/builtins-image-load.c
+++ b/clang/test/CodeGen/builtins-image-load.c
@@ -22,12 +22,12 @@ typedef half half4 __attribute__((ext_vector_type(4)));
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.amdgcn.image.load.2d.f32.i32.v8i32(i32 12, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
+// CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.amdgcn.image.load.2d.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
// CHECK-NEXT: ret float [[TMP3]]
//
float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(12, i32, i32, tex, 106, 103);
+ return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, 103);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_2d_1(
@@ -46,12 +46,12 @@ float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.load.2d.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP3]]
//
float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(100, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_2d_2(
@@ -70,12 +70,12 @@ float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t te
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.load.2d.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.load.2d.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP3]]
//
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_image_load_2darray(
@@ -95,12 +95,12 @@ half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.2darray.f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.2darray.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP4]]
//
float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_f32_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2darray_f32_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_2darray_1(
@@ -120,12 +120,12 @@ float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.2darray.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.2darray.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f32_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2darray_v4f32_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_2darray_2(
@@ -145,12 +145,12 @@ float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.2darray.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.2darray.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f16_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2darray_v4f16_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_1d_1(
@@ -168,12 +168,12 @@ half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.amdgcn.image.load.1d.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP2]]
//
float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f32_i32(100, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_1d_2(
@@ -191,12 +191,12 @@ float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t te
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
-// CHECK-NEXT: [[TMP2:%.*]] = call <4 x half> @llvm.amdgcn.image.load.1d.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP2:%.*]] = call <4 x half> @llvm.amdgcn.image.load.1d.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP2]]
//
half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f16_i32(100, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1d_v4f16_i32(i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_1darray_1(
@@ -215,12 +215,12 @@ half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.load.1darray.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.load.1darray.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP3]]
//
float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f32_i32(100, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1darray_v4f32_i32(i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_1darray_2(
@@ -239,12 +239,12 @@ float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.load.1darray.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.load.1darray.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP3]]
//
half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f16_i32(100, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1darray_v4f16_i32(i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_3d_1(
@@ -264,12 +264,12 @@ half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.3d.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.3d.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f32_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_3d_v4f32_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_3d_2(
@@ -289,12 +289,12 @@ float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t te
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.3d.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.3d.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f16_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_cube_1(
@@ -314,12 +314,12 @@ half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.cube.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.cube.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f32_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_cube_2(
@@ -339,12 +339,12 @@ float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.cube.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.cube.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f16_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_1d_1(
@@ -363,12 +363,12 @@ half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t te
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.1d.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP3]]
//
float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(100, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_1d_2(
@@ -387,12 +387,12 @@ float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.1d.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.1d.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP3]]
//
half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(100, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_1darray_1(
@@ -412,12 +412,12 @@ half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.1darray.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_1darray_2(
@@ -437,12 +437,12 @@ float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_tex
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.1darray.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.1darray.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_image_load_mip_2d(
@@ -462,12 +462,12 @@ half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_textu
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.mip.2d.f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.mip.2d.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP4]]
//
float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_f32_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_2d_1(
@@ -487,12 +487,12 @@ float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.2d.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_2d_2(
@@ -512,12 +512,12 @@ float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.2d.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.2d.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(100, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_image_load_mip_2darray(
@@ -538,12 +538,12 @@ half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.load.mip.2darray.f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.load.mip.2darray.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP5]]
//
float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_f32_i32(100, i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_2darray_1(
@@ -564,12 +564,12 @@ float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.2darray.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(100, i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_2darray_2(
@@ -590,12 +590,12 @@ float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_tex
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.2darray.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.2darray.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(100, i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_3d_1(
@@ -616,12 +616,12 @@ half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_textu
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.3d.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(100, i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_3d_2(
@@ -642,12 +642,12 @@ float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.3d.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.3d.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(100, i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_cube_1(
@@ -668,12 +668,12 @@ half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.load.mip.cube.v4f32.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(100, i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_cube_2(
@@ -694,12 +694,12 @@ float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_textur
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.cube.v4f16.i32.v8i32(i32 100, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.load.mip.cube.v4f16.i32.v8i32(i32 15, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(100, i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_1d_v4f32_f32(
@@ -724,11 +724,11 @@ half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
// CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP3]]
//
float4 test_builtin_amdgcn_image_sample_1d_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f32_f32(100, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1d_v4f32_f32(f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_1d_v4f16_f32(
@@ -753,11 +753,11 @@ float4 test_builtin_amdgcn_image_sample_1d_v4f32_f32(float4 v4f32, int i32, floa
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP1]], align 32
// CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.1d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP3:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.1d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP2]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP3]]
//
half4 test_builtin_amdgcn_image_sample_1d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f16_f32(100, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_1darray_v4f32_f32(
@@ -780,11 +780,11 @@ half4 test_builtin_amdgcn_image_sample_1d_v4f16_f32(half4 v4f16, int i32, float
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.1darray.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.1darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_amdgcn_image_sample_1darray_v4f32_f32(int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_1darray_v4f16_f32(
@@ -810,11 +810,11 @@ float4 test_builtin_amdgcn_image_sample_1darray_v4f32_f32(int i32, float f32, __
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.1darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.1darray.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_amdgcn_image_sample_1darray_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_amdgcn_image_sample_2d_f32_f32(
@@ -837,11 +837,11 @@ half4 test_builtin_amdgcn_image_sample_1darray_v4f16_f32(half4 v4f16, int i32, f
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.sample.2d.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.sample.2d.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP4]]
//
float test_builtin_amdgcn_image_sample_2d_f32_f32(int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_2d_v4f32_f32(
@@ -867,11 +867,11 @@ float test_builtin_amdgcn_image_sample_2d_f32_f32(int i32, float f32, __amdgpu_t
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_amdgcn_image_sample_2d_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_2d_v4f16_f32(
@@ -897,11 +897,11 @@ float4 test_builtin_amdgcn_image_sample_2d_v4f32_f32(float4 v4f32, int i32, floa
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.2d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.2d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_amdgcn_image_sample_2d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_amdgcn_image_sample_2darray_f32_f32(
@@ -925,11 +925,11 @@ half4 test_builtin_amdgcn_image_sample_2d_v4f16_f32(half4 v4f16, int i32, float
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.sample.2darray.f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.sample.2darray.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP5]]
//
float test_builtin_amdgcn_image_sample_2darray_f32_f32(int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_2darray_v4f32_f32(
@@ -956,11 +956,11 @@ float test_builtin_amdgcn_image_sample_2darray_f32_f32(int i32, float f32, __amd
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.2darray.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.2darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_amdgcn_image_sample_2darray_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_2darray_v4f16_f32(
@@ -987,11 +987,11 @@ float4 test_builtin_amdgcn_image_sample_2darray_v4f32_f32(float4 v4f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.2darray.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.2darray.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_amdgcn_image_sample_2darray_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_3d_v4f32_f32(
@@ -1018,11 +1018,11 @@ half4 test_builtin_amdgcn_image_sample_2darray_v4f16_f32(half4 v4f16, int i32, f
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.3d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_amdgcn_image_sample_3d_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_3d_v4f16_f32(
@@ -1049,11 +1049,11 @@ float4 test_builtin_amdgcn_image_sample_3d_v4f32_f32(float4 v4f32, int i32, floa
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.3d.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.3d.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_amdgcn_image_sample_3d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_cube_v4f32_f32(
@@ -1080,11 +1080,11 @@ half4 test_builtin_amdgcn_image_sample_3d_v4f16_f32(half4 v4f16, int i32, float
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.cube.v4f32.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.cube.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_amdgcn_image_sample_cube_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_cube_v4f16_f32(
@@ -1111,9 +1111,9 @@ float4 test_builtin_amdgcn_image_sample_cube_v4f32_f32(float4 v4f32, int i32, fl
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.cube.v4f16.f32.v8i32.v4i32(i32 100, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x half> @llvm.amdgcn.image.sample.cube.v4f16.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_amdgcn_image_sample_cube_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
diff --git a/clang/test/CodeGen/builtins-image-store.c b/clang/test/CodeGen/builtins-image-store.c
index 5309a16df7033..df3f9490230cc 100644
--- a/clang/test/CodeGen/builtins-image-store.c
+++ b/clang/test/CodeGen/builtins-image-store.c
@@ -22,12 +22,12 @@ typedef half half4 __attribute__((ext_vector_type(4)));
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.f32.i32.v8i32(float [[TMP0]], i32 12, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2d_f32_i32(f32, 12, i32, i32, tex, 106, 103);
+ __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, tex, 106, 103);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2d_1(
@@ -47,12 +47,12 @@ void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, 100, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2d_2(
@@ -72,12 +72,12 @@ void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2darray(
@@ -98,12 +98,12 @@ void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.f32.i32.v8i32(float [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2darray_f32_i32(f32, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2darray_f32_i32(f32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2darray_1(
@@ -124,12 +124,12 @@ void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2darray_2(
@@ -150,12 +150,12 @@ void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1d_1(
@@ -174,12 +174,12 @@ void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.1d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.1d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, 100, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1d_2(
@@ -198,12 +198,12 @@ void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.1d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.1d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, 100, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1darray_1(
@@ -223,12 +223,12 @@ void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.1darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.1darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, 100, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1darray_2(
@@ -248,12 +248,12 @@ void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.1darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.1darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, 100, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_3d_1(
@@ -274,12 +274,12 @@ void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.3d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.3d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_3d_2(
@@ -300,12 +300,12 @@ void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.3d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.3d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_cube_1(
@@ -326,12 +326,12 @@ void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.cube.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.cube.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_cube_2(
@@ -352,12 +352,12 @@ void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.cube.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.cube.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1d_1(
@@ -377,12 +377,12 @@ void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t te
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, 100, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1d_2(
@@ -402,12 +402,12 @@ void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1darray_1(
@@ -428,12 +428,12 @@ void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1darray_2(
@@ -454,12 +454,12 @@ void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_text
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.1darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2d(
@@ -480,12 +480,12 @@ void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_textu
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.f32.i32.v8i32(float [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2d_1(
@@ -506,12 +506,12 @@ void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2d_2(
@@ -532,12 +532,12 @@ void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2darray(
@@ -559,12 +559,12 @@ void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.f32.i32.v8i32(float [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, 100, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2darray_1(
@@ -586,12 +586,12 @@ void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2darray_2(
@@ -613,12 +613,12 @@ void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_text
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_3d_1(
@@ -640,12 +640,12 @@ void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_textu
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.3d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.3d.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, 100, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_3d_2(
@@ -667,12 +667,12 @@ void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.3d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.3d.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_cube_1(
@@ -694,12 +694,12 @@ void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.cube.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.cube.v4f32.i32.v8i32(<4 x float> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, 100, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_cube_2(
@@ -721,10 +721,10 @@ void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.cube.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 100, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.cube.v4f16.i32.v8i32(<4 x half> [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110);
}
diff --git a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
index 47dbdd4e51782..37a9ecb5733da 100644
--- a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
+++ b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
@@ -27,201 +27,216 @@ float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int
return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(8, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
}
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_err_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(-1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument value -1 is outside the valid range [1, 15]}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_err_2(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(31, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument value 31 is outside the valid range [1, 15]}}
+}
+
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_err_3(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
+}
+
float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(i32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, 103); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, 103); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(i32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f32_f32' must be a constant integer}}
}
float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f32_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(23, f32, tex, vec4i32, 0, i32, 11); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(f32, tex, vec4i32, 0, i32, 11); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(i32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f16_f32' must be a constant integer}}
}
half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f16_f32' must be a constant integer}}
}
float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_f32_f32' must be a constant integer}}
}
float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_f32_f32' must be a constant integer}}
}
float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_f32_f32' must be a constant integer}}
}
float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_f32_f32' must be a constant integer}}
}
float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_f32_f32' must be a constant integer}}
}
float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_f32_f32' must be a constant integer}}
}
diff --git a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
index e60f8c70dc7c4..924d2d9b41e06 100644
--- a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
+++ b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
@@ -29,199 +29,199 @@ float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int
float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(105, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(10, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(105, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f32_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(105, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(105, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(105, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(105, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f16_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_f32_f32' needs target feature extended-image-insts}}
}
diff --git a/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl b/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl
index 8f609dcbd34f2..db4af58400840 100644
--- a/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl
@@ -7,188 +7,188 @@ typedef half half4 __attribute__((ext_vector_type(4)));
float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, i32, tex, 106, 103); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_f32_i32' must be a constant integer}}
}
float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(100, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f16_i32' must be a constant integer}}
}
float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_f32_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2darray_f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_f32_i32' must be a constant integer}}
}
float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f32_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f16_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2darray_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f16_i32(100, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1d_v4f16_i32(i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f32_i32(100, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1darray_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f16_i32(100, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1darray_v4f16_i32(i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f32_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_3d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(100, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f16_i32' must be a constant integer}}
}
float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_f32_i32' must be a constant integer}}
}
float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f16_i32' must be a constant integer}}
}
float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_f32_i32' must be a constant integer}}
}
float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f16_i32' must be a constant integer}}
}
float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f16_i32' must be a constant integer}}
}
float test_builtin_image_sample_2d(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(i32, f32, f32, tex, vec4i32, 0, 106, 103); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 106, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_f32_f32' must be a constant integer}}
}
float4 test_builtin_image_sample_2d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_2d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f16_f32' must be a constant integer}}
}
float test_builtin_image_sample_2darray(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_f32_f32(100, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_f32_f32' must be a constant integer}}
}
float4 test_builtin_image_sample_2darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_2darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f16_f32' must be a constant integer}}
}
float4 test_builtin_image_sample_1d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f32_f32(i32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1d_v4f32_f32(f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_1d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f16_f32(100, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f16_f32' must be a constant integer}}
}
float4 test_builtin_image_sample_1darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f32_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_1darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f16_f32' must be a constant integer}}
}
float4 test_builtin_image_sample_3d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_3d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f16_f32' must be a constant integer}}
}
float4 test_builtin_image_sample_cube_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f32_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_cube_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f16_f32' must be a constant integer}}
}
diff --git a/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl b/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl
index b8780024f1076..a08383fcd2e97 100644
--- a/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl
@@ -7,213 +7,213 @@ typedef half half4 __attribute__((ext_vector_type(4)));
float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d' needs target feature image-insts}}
}
float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2d_2' needs target feature image-insts}}
}
float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_f32_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2darray_f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray' needs target feature image-insts}}
}
float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f32_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f16_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2darray_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f16_i32(100, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1d_v4f16_i32(i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f32_i32(100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1darray_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f16_i32(100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1darray_v4f16_i32(i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f32_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_3d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_2' needs target feature image-insts}}
}
float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2d_2' needs target feature image-insts}}
}
float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2darray' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_2' needs target feature image-insts}}
}
float test_builtin_image_load_2d_gfx(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(12, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d_gfx' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d_gfx' needs target feature image-insts}}
}
float4 test_builtin_image_load_2d_gfx_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(100, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_2d_gfx_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_2' needs target feature image-insts}}
}
float test_builtin_image_sample_2d(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(i32, f32, f32, tex, vec4i32, 0, 106, 103); //expected-error{{'test_builtin_image_sample_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 106, 103); //expected-error{{'test_builtin_image_sample_2d' needs target feature image-insts}}
}
float4 test_builtin_image_sample_2d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2d_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_2d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2d_2' needs target feature image-insts}}
}
float test_builtin_image_sample_2darray(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_f32_f32(100, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray' needs target feature image-insts}}
}
float4 test_builtin_image_sample_2darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_2darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_1d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f32_f32(i32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1d_v4f32_f32(f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_1d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f16_f32(100, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_1darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f32_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_1darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_3d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_3d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_cube_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f32_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_cube_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_2' needs target feature image-insts}}
}
float test_builtin_image_sample_2d_gfx(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx' needs target feature image-insts}}
}
float4 test_builtin_image_sample_2d_gfx_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_2d_gfx_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_2' needs target feature image-insts}}
}
diff --git a/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl b/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl
index 4f6347e1c5286..11dcceedf471d 100644
--- a/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl
@@ -6,124 +6,124 @@ typedef half half4 __attribute__((ext_vector_type(4)));
void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, i32, tex, 106, 103); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, tex, 106, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, 100, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_f32_i32(f32, 100, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2darray_f32_i32(f32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, 100, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, 100, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, 100, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f16_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f16_i32' must be a constant integer}}
}
diff --git a/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl b/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl
index d0085e5403b5f..a9ff602fa08e7 100644
--- a/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl
@@ -6,124 +6,124 @@ typedef half half4 __attribute__((ext_vector_type(4)));
void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_store_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_store_2d' needs target feature image-insts}}
}
void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, 100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2d_1' needs target feature image-insts}}
}
void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2d_2' needs target feature image-insts}}
}
void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_f32_i32(f32, 100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2darray_f32_i32(f32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray' needs target feature image-insts}}
}
void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_1' needs target feature image-insts}}
}
void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, 100, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_2' needs target feature image-insts}}
}
void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, 100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, 100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_1' needs target feature image-insts}}
}
void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_2' needs target feature image-insts}}
}
void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_1' needs target feature image-insts}}
}
void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2d_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2darray' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_2' needs target feature image-insts}}
}
diff --git a/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl b/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl
index b1920bddbf181..77d016e0e76e4 100644
--- a/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl
+++ b/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl
@@ -51,11 +51,11 @@ float16_4 test_explicit_cast_float16_4_to_half4(float16_4 f16_4) {
}
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, 110); // expected-no-error
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, 110); // expected-no-error
}
half4 test_builtin_amdgcn_image_sample_2d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); // expected-no-error
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110); // expected-no-error
}
void test_half_mismatch_vector_size_error(float16_2 f16_2, float16_3 f16_3, float16_4 f16_4, float16_8 f16_8, float16_16 f16_16) {
>From b3e0d785c856e484f98ec1f0b4bf359347ad4b73 Mon Sep 17 00:00:00 2001
From: Alexey Sachkov <alexey.sachkov at amd.com>
Date: Mon, 16 Feb 2026 11:45:12 -0600
Subject: [PATCH 2/3] Completely rework the patch
Instead of dropping the dmask argument, made sure that it is properly
diagnosed to match restrictions on the corresponding LLVM IR intrinsics.
---
clang/include/clang/Basic/BuiltinsAMDGPU.td | 220 ++++++-------
.../clang/Basic/DiagnosticSemaKinds.td | 4 +
clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp | 36 +--
clang/lib/Sema/SemaAMDGPU.cpp | 53 +++-
clang/test/CodeGen/builtins-extended-image.c | 102 +++---
clang/test/CodeGen/builtins-image-load.c | 94 +++---
clang/test/CodeGen/builtins-image-store.c | 64 ++--
...iltins-extended-image-param-gfx1100-err.cl | 290 +++++++++++++++---
...uiltins-extended-image-param-gfx942-err.cl | 80 ++---
.../builtins-image-load-param-gfx1100-err.cl | 161 +++++++---
.../builtins-image-load-param-gfx942-err.cl | 96 +++---
.../builtins-image-store-param-gfx1100-err.cl | 104 +++++--
.../builtins-image-store-param-gfx942-err.cl | 56 ++--
.../half-float16-vector-compatibility.cl | 4 +-
14 files changed, 844 insertions(+), 520 deletions(-)
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.td b/clang/include/clang/Basic/BuiltinsAMDGPU.td
index 0556c3f3d363f..c1ca7d4fd8e77 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.td
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.td
@@ -921,114 +921,114 @@ def __builtin_amdgcn_cooperative_atomic_store_8x16B : AMDGPUBuiltin<"void(_ExtVe
//===----------------------------------------------------------------------===//
// Image builtins
//===----------------------------------------------------------------------===//
-def __builtin_amdgcn_image_load_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_load_mip_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_store_mip_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
-def __builtin_amdgcn_image_sample_lz_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_lz_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_l_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2d_f32_f32 : AMDGPUBuiltin<"float(float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2darray_f32_f32 : AMDGPUBuiltin<"float(float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
-def __builtin_amdgcn_image_sample_d_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_load_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2d_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2darray_f32_i32 : AMDGPUBuiltin<"float(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_3d_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_3d_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_cube_v4f32_i32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_load_mip_cube_v4f16_i32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_1darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2d_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2darray_f32_i32 : AMDGPUBuiltin<"void(float, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2darray_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_2darray_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_3d_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_3d_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_cube_v4f32_i32 : AMDGPUBuiltin<"void(_ExtVector<4, float>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_store_mip_cube_v4f16_i32 : AMDGPUBuiltin<"void(_ExtVector<4, _Float16>, int, int, int, int, int, __amdgpu_texture_t, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "image-insts">;
+def __builtin_amdgcn_image_sample_lz_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_lz_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_cube_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_l_cube_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_1darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2d_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2darray_f32_f32 : AMDGPUBuiltin<"float(int, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2darray_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_2darray_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_3d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
+def __builtin_amdgcn_image_sample_d_3d_v4f16_f32 : AMDGPUBuiltin<"_ExtVector<4, _Float16>(int, float, float, float, float, float, float, float, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
def __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32 : AMDGPUBuiltin<"_ExtVector<4, float>(int, float, float, __amdgpu_texture_t, _ExtVector<4, int>, bool, int, int)", [Const], "extended-image-insts">;
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index f12677ac11600..25303010d9cdf 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -13931,6 +13931,10 @@ def note_amdgcn_load_lds_size_valid_value : Note<"size must be %select{1, 2, or
def err_amdgcn_coop_atomic_invalid_as : Error<"cooperative atomic requires a global or generic pointer">;
+def err_amdgcn_dmask_has_too_many_bits_set
+ : Error<"dmask argument cannot have more bits set than there are elements "
+ "in return type">;
+
def warn_amdgpu_s_wait_event_mask_no_effect_target :
Warning<"event mask has no effect for target">,
InGroup<DiagGroup<"amdgpu-wait-event-mask">>;
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index a0a992ae9503e..bff1ed3d2ec19 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -219,8 +219,7 @@ static llvm::Value *loadTextureDescPtorAsVec8I32(CodeGenFunction &CGF,
llvm::CallInst *
emitAMDGCNImageOverloadedReturnType(clang::CodeGen::CodeGenFunction &CGF,
const clang::CallExpr *E,
- unsigned IntrinsicID, bool IsImageStore,
- bool InsertDMaskImplicitArg = true) {
+ unsigned IntrinsicID, bool IsImageStore) {
auto findTextureDescIndex = [&CGF](const CallExpr *E) -> unsigned {
QualType TexQT = CGF.getContext().AMDGPUTextureTy;
for (unsigned I = 0, N = E->getNumArgs(); I < N; ++I) {
@@ -244,37 +243,13 @@ emitAMDGCNImageOverloadedReturnType(clang::CodeGen::CodeGenFunction &CGF,
llvm::report_fatal_error("Invalid argument count for image builtin");
}
- auto handleArgument = [&](unsigned Index) -> void {
- llvm::Value *V = CGF.EmitScalarExpr(E->getArg(Index));
- if (Index == RsrcIndex)
+ for (unsigned I = 0; I < E->getNumArgs(); ++I) {
+ llvm::Value *V = CGF.EmitScalarExpr(E->getArg(I));
+ if (I == RsrcIndex)
V = loadTextureDescPtorAsVec8I32(CGF, V);
Args.push_back(V);
- };
-
- if (InsertDMaskImplicitArg) {
- llvm::Type *TyForDMask = IsImageStore
- ? CGF.ConvertType(E->getArg(0)->getType())
- : CGF.ConvertType(E->getType());
- // Implicit DMask is an all-ones mask of N bits where N is the number of
- // elements in return type.
- int DMask = 1;
- if (auto *VTy = dyn_cast<llvm::FixedVectorType>(TyForDMask))
- DMask = static_cast<int>(VTy->getNumElements());
- DMask = (1 << DMask) - 1;
-
- // Store intrinsics have output argument first and then DMask, all other
- // intrinsics have DMask as fist argument.
- if (IsImageStore)
- handleArgument(0);
- Args.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(CGF.getLLVMContext()), DMask));
}
- unsigned StartIndex =
- static_cast<int>(InsertDMaskImplicitArg && IsImageStore);
- for (unsigned I = StartIndex; I < E->getNumArgs(); ++I)
- handleArgument(I);
-
llvm::Type *RetTy = IsImageStore ? CGF.VoidTy : CGF.ConvertType(E->getType());
llvm::CallInst *Call = CGF.Builder.CreateIntrinsic(RetTy, IntrinsicID, Args);
return Call;
@@ -1264,8 +1239,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
*this, E, Intrinsic::amdgcn_image_sample_d_2darray, false);
case clang::AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32:
return emitAMDGCNImageOverloadedReturnType(
- *this, E, Intrinsic::amdgcn_image_gather4_lz_2d, false,
- /*InsertDMaskImplicitArg = */ false);
+ *this, E, Intrinsic::amdgcn_image_gather4_lz_2d, false);
case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_16x16x128_f8f6f4:
case AMDGPU::BI__builtin_amdgcn_mfma_scale_f32_32x32x64_f8f6f4: {
llvm::FixedVectorType *VT = FixedVectorType::get(Builder.getInt32Ty(), 8);
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index acebbf6c3aa8f..bb0da893b7198 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -249,21 +249,40 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
unsigned ArgCount = TheCall->getNumArgs() - 1;
llvm::APSInt Result;
- bool ExtraGatherChecks = false;
- if (BuiltinID == AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32) {
- // Gather has the DMask argument.
- const Type *RetTy = TheCall->getType().getTypePtr();
- constexpr int Low = 1;
- int High = 1;
- if (auto *VTy = dyn_cast<VectorType>(RetTy))
- High = VTy->getNumElements();
- // Which cannot accept values that have more bits set that there are
- // elements in the return type.
- High = (1 << High) - 1;
- ExtraGatherChecks |= SemaRef.BuiltinConstantArgRange(
- TheCall, 0, Low, High, /* RangeIsError = */ true);
+ // Compilain about dmask values which are too huge to fully fit into 4 bits
+ // (which is the actual size of the dmask in corresponding HW instructions).
+ constexpr unsigned DMaskArgNo = 0;
+ constexpr int Low = 0;
+ constexpr int High = 15;
+ if (SemaRef.BuiltinConstantArg(TheCall, DMaskArgNo, Result) ||
+ SemaRef.BuiltinConstantArgRange(TheCall, DMaskArgNo, Low, High,
+ /* RangeIsError = */ true))
+ return true;
+
+ // Dmask indicates which elements should be returned and it is not possible
+ // to return more values than there are elements in return type.
+ int NumElementsInRetTy = 1;
+ const Type *RetTy = TheCall->getType().getTypePtr();
+ if (auto *VTy = dyn_cast<VectorType>(RetTy))
+ NumElementsInRetTy = VTy->getNumElements();
+ assert(
+ High <= 4 &&
+ "DMask is a 4-bit value in underlying LLVM IR intrinsics (and HW "
+ "instructions). This assert indicates malformed built-in declaration");
+ int NumActiveBitsInDMask =
+ llvm::popcount(static_cast<uint8_t>(Result.getExtValue()));
+ if (NumActiveBitsInDMask > NumElementsInRetTy) {
+ Diag(TheCall->getBeginLoc(),
+ diag::err_amdgcn_dmask_has_too_many_bits_set);
+ return true;
}
+ bool ExtraGatherChecks = false;
+ // For gather, only one bit can be set indicating which exact component to
+ // return.
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32)
+ ExtraGatherChecks |= SemaRef.BuiltinConstantArgPower2(TheCall, 0);
+
return ExtraGatherChecks ||
(SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
(SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result));
@@ -308,7 +327,13 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
unsigned ArgCount = TheCall->getNumArgs() - 1;
llvm::APSInt Result;
- return (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
+ // Complain about dmask values which are too huge to fully fit into 4 bits
+ // (which is the actual size of the dmask in corresponding HW instructions).
+ constexpr unsigned DMaskArgNo = 1;
+ return (SemaRef.BuiltinConstantArgRange(TheCall, DMaskArgNo, /* Low = */ 0,
+ /* High = */ 15,
+ /* RangeIsError = */ true)) ||
+ (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
(SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result));
}
case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8:
diff --git a/clang/test/CodeGen/builtins-extended-image.c b/clang/test/CodeGen/builtins-extended-image.c
index 44a757b75e3a2..9ac7ec42d4e50 100644
--- a/clang/test/CodeGen/builtins-extended-image.c
+++ b/clang/test/CodeGen/builtins-extended-image.c
@@ -156,7 +156,7 @@ float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int
//
float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(15, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_1d_v4f32_f32(
@@ -187,7 +187,7 @@ float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32
//
float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_1d_v4f32_f32(
@@ -219,7 +219,7 @@ float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32,
//
float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_2d_v4f32_f32(
@@ -250,7 +250,7 @@ float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32,
//
float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_2d_v4f32_f32(
@@ -277,12 +277,12 @@ float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2d.v4f32.f32.v8i32.v4i32(i32 10, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(10, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_2d_v4f32_f32(
@@ -317,7 +317,7 @@ float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32,
//
float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(15, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_3d_v4f32_f32(
// CHECK-SAME: <4 x float> noundef [[V4F32:%.*]], float noundef [[F32:%.*]], i32 noundef [[I32:%.*]], ptr [[TEX:%.*]], <4 x i32> noundef [[VEC4I32:%.*]]) #[[ATTR0]] {
@@ -348,7 +348,7 @@ float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32,
//
float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_3d_v4f32_f32(
@@ -376,12 +376,12 @@ float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.3d.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.3d.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_3d_v4f32_f32(
@@ -414,12 +414,12 @@ float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP9]], align 32
// CHECK-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP11:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.3d.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], float [[TMP7]], float [[TMP8]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP10]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP11]]
//
float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_cube_v4f32_f32(
@@ -446,12 +446,12 @@ float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32,
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.cube.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.cube.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_cube_v4f32_f32(
@@ -479,12 +479,12 @@ float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.cube.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.cube.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_1darray_v4f32_f32(
@@ -510,12 +510,12 @@ float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i3
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
// CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.1darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP3]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_1darray_v4f32_f32(
@@ -542,12 +542,12 @@ float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, in
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.1darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_1darray_v4f32_f32(
@@ -575,12 +575,12 @@ float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1darray.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.1darray.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_lz_2darray_v4f32_f32(
@@ -607,12 +607,12 @@ float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.lz.2darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_l_2darray_v4f32_f32(
@@ -640,12 +640,12 @@ float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, in
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
// CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2darray.v4f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP6:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.l.2darray.v4f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP5]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP6]]
//
float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_amdgcn_image_sample_d_2darray_v4f32_f32(
@@ -676,12 +676,12 @@ float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
// CHECK-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2darray.v4f32.f32.f32.v8i32.v4i32(i32 15, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP9:%.*]] = call <4 x float> @llvm.amdgcn.image.sample.d.2darray.v4f32.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], float [[TMP3]], float [[TMP4]], float [[TMP5]], float [[TMP6]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP8]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret <4 x float> [[TMP9]]
//
float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_1d_v4f16_f32(
@@ -711,7 +711,7 @@ float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int
//
half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(15, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_1d_v4f16_f32(
@@ -742,7 +742,7 @@ half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_1d_v4f16_f32(
@@ -774,7 +774,7 @@ half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_2d_v4f16_f32(
@@ -805,7 +805,7 @@ half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_2d_v4f16_f32(
@@ -837,7 +837,7 @@ half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_2d_v4f16_f32(
@@ -872,7 +872,7 @@ half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(15, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_3d_v4f16_f32(
@@ -904,7 +904,7 @@ half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_3d_v4f16_f32(
@@ -937,7 +937,7 @@ half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(15, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_3d_v4f16_f32(
@@ -975,7 +975,7 @@ half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(15, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_cube_v4f16_f32(
@@ -1007,7 +1007,7 @@ half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32,
//
half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_cube_v4f16_f32(
@@ -1040,7 +1040,7 @@ half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i3
//
half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(15, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_1darray_v4f16_f32(
@@ -1071,7 +1071,7 @@ half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32
//
half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_1darray_v4f16_f32(
@@ -1103,7 +1103,7 @@ half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int
//
half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_1darray_v4f16_f32(
@@ -1136,7 +1136,7 @@ half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int
//
half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(15, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_lz_2darray_v4f16_f32(
@@ -1168,7 +1168,7 @@ half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int
//
half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_l_2darray_v4f16_f32(
@@ -1201,7 +1201,7 @@ half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int
//
half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(15, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_amdgcn_image_sample_d_2darray_v4f16_f32(
@@ -1237,7 +1237,7 @@ half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int
//
half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(15, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_lz_2d_f32_f32(
@@ -1268,7 +1268,7 @@ half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int
//
float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_l_2d_f32_f32(
@@ -1300,7 +1300,7 @@ float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, _
//
float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_d_2d_f32_f32(
@@ -1335,7 +1335,7 @@ float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __
//
float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_lz_2darray_f32_f32(
@@ -1367,7 +1367,7 @@ float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __
//
float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_l_2darray_f32_f32(
@@ -1400,7 +1400,7 @@ float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i
//
float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_amdgcn_image_sample_d_2darray_f32_f32(
@@ -1436,5 +1436,5 @@ float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i3
//
float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
diff --git a/clang/test/CodeGen/builtins-image-load.c b/clang/test/CodeGen/builtins-image-load.c
index 04d4610ccb7a7..a6b8458a57d9e 100644
--- a/clang/test/CodeGen/builtins-image-load.c
+++ b/clang/test/CodeGen/builtins-image-load.c
@@ -22,12 +22,12 @@ typedef half half4 __attribute__((ext_vector_type(4)));
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP2]], align 32
-// CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.amdgcn.image.load.2d.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
+// CHECK-NEXT: [[TMP3:%.*]] = call float @llvm.amdgcn.image.load.2d.f32.i32.v8i32(i32 2, i32 [[TMP0]], i32 [[TMP1]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
// CHECK-NEXT: ret float [[TMP3]]
//
float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, 103);
+ return __builtin_amdgcn_image_load_2d_f32_i32(2, i32, i32, tex, 106, 103);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_2d_1(
@@ -51,7 +51,7 @@ float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
//
float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_2d_2(
@@ -75,7 +75,7 @@ float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t te
//
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_image_load_2darray(
@@ -95,12 +95,12 @@ half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.2darray.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.2darray.f32.i32.v8i32(i32 4, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP4]]
//
float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_f32_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2darray_f32_i32(4, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_2darray_1(
@@ -125,7 +125,7 @@ float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex
//
float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f32_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2darray_v4f32_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_2darray_2(
@@ -150,7 +150,7 @@ float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture
//
half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f16_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_2darray_v4f16_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_1d_1(
@@ -173,7 +173,7 @@ half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t
//
float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1d_v4f32_i32(15, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_1d_2(
@@ -196,7 +196,7 @@ float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t te
//
half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f16_i32(i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1d_v4f16_i32(15, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_1darray_1(
@@ -220,7 +220,7 @@ half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
//
float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f32_i32(i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1darray_v4f32_i32(15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_1darray_2(
@@ -244,7 +244,7 @@ float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture
//
half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f16_i32(i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_1darray_v4f16_i32(15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_3d_1(
@@ -269,7 +269,7 @@ half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t
//
float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f32_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_3d_v4f32_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_3d_2(
@@ -294,7 +294,7 @@ float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t te
//
half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_3d_v4f16_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_cube_1(
@@ -319,7 +319,7 @@ half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
//
float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_cube_v4f32_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_cube_2(
@@ -344,7 +344,7 @@ float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t
//
half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_cube_v4f16_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_1d_1(
@@ -368,7 +368,7 @@ half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t te
//
float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_1d_2(
@@ -392,7 +392,7 @@ float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_
//
half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_1darray_1(
@@ -417,7 +417,7 @@ half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t
//
float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_1darray_2(
@@ -442,7 +442,7 @@ float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_tex
//
half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_image_load_mip_2d(
@@ -462,12 +462,12 @@ half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_textu
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.mip.2d.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP4:%.*]] = call float @llvm.amdgcn.image.load.mip.2d.f32.i32.v8i32(i32 8, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP4]]
//
float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2d_f32_i32(8, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_2d_1(
@@ -492,7 +492,7 @@ float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex)
//
float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_2d_2(
@@ -517,7 +517,7 @@ float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_
//
half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_image_load_mip_2darray(
@@ -538,12 +538,12 @@ half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.load.mip.2darray.f32.i32.v8i32(i32 1, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.load.mip.2darray.f32.i32.v8i32(i32 8, i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP5]]
//
float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2darray_f32_i32(8, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_2darray_1(
@@ -569,7 +569,7 @@ float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t
//
float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_2darray_2(
@@ -595,7 +595,7 @@ float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_tex
//
half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_3d_1(
@@ -621,7 +621,7 @@ half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_textu
//
float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_3d_2(
@@ -647,7 +647,7 @@ float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_
//
half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_image_load_mip_cube_1(
@@ -673,7 +673,7 @@ half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t
//
float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_image_load_mip_cube_2(
@@ -699,7 +699,7 @@ float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_textur
//
half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, 110);
+ return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_1d_v4f32_f32(
@@ -728,7 +728,7 @@ half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_
// CHECK-NEXT: ret <4 x float> [[TMP3]]
//
float4 test_builtin_amdgcn_image_sample_1d_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f32_f32(f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1d_v4f32_f32(15, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_1d_v4f16_f32(
@@ -757,7 +757,7 @@ float4 test_builtin_amdgcn_image_sample_1d_v4f32_f32(float4 v4f32, int i32, floa
// CHECK-NEXT: ret <4 x half> [[TMP3]]
//
half4 test_builtin_amdgcn_image_sample_1d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1d_v4f16_f32(15, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_1darray_v4f32_f32(
@@ -784,7 +784,7 @@ half4 test_builtin_amdgcn_image_sample_1d_v4f16_f32(half4 v4f16, int i32, float
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_amdgcn_image_sample_1darray_v4f32_f32(int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1darray_v4f32_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_1darray_v4f16_f32(
@@ -814,7 +814,7 @@ float4 test_builtin_amdgcn_image_sample_1darray_v4f32_f32(int i32, float f32, __
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_amdgcn_image_sample_1darray_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_1darray_v4f16_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_amdgcn_image_sample_2d_f32_f32(
@@ -841,7 +841,7 @@ half4 test_builtin_amdgcn_image_sample_1darray_v4f16_f32(half4 v4f16, int i32, f
// CHECK-NEXT: ret float [[TMP4]]
//
float test_builtin_amdgcn_image_sample_2d_f32_f32(int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_2d_v4f32_f32(
@@ -871,7 +871,7 @@ float test_builtin_amdgcn_image_sample_2d_f32_f32(int i32, float f32, __amdgpu_t
// CHECK-NEXT: ret <4 x float> [[TMP4]]
//
float4 test_builtin_amdgcn_image_sample_2d_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_2d_v4f16_f32(
@@ -901,7 +901,7 @@ float4 test_builtin_amdgcn_image_sample_2d_v4f32_f32(float4 v4f32, int i32, floa
// CHECK-NEXT: ret <4 x half> [[TMP4]]
//
half4 test_builtin_amdgcn_image_sample_2d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(15, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local float @test_builtin_amdgcn_image_sample_2darray_f32_f32(
@@ -925,11 +925,11 @@ half4 test_builtin_amdgcn_image_sample_2d_v4f16_f32(half4 v4f16, int i32, float
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
// CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr [[VEC4I32_ADDR_ASCAST]], align 16
-// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.sample.2darray.f32.f32.v8i32.v4i32(i32 1, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
+// CHECK-NEXT: [[TMP5:%.*]] = call float @llvm.amdgcn.image.sample.2darray.f32.f32.v8i32.v4i32(i32 4, float [[TMP0]], float [[TMP1]], float [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], <4 x i32> [[TMP4]], i1 false, i32 120, i32 110)
// CHECK-NEXT: ret float [[TMP5]]
//
float test_builtin_amdgcn_image_sample_2darray_f32_f32(int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2darray_f32_f32(4, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_2darray_v4f32_f32(
@@ -960,7 +960,7 @@ float test_builtin_amdgcn_image_sample_2darray_f32_f32(int i32, float f32, __amd
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_amdgcn_image_sample_2darray_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2darray_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_2darray_v4f16_f32(
@@ -991,7 +991,7 @@ float4 test_builtin_amdgcn_image_sample_2darray_v4f32_f32(float4 v4f32, int i32,
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_amdgcn_image_sample_2darray_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_2darray_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_3d_v4f32_f32(
@@ -1022,7 +1022,7 @@ half4 test_builtin_amdgcn_image_sample_2darray_v4f16_f32(half4 v4f16, int i32, f
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_amdgcn_image_sample_3d_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_3d_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_3d_v4f16_f32(
@@ -1053,7 +1053,7 @@ float4 test_builtin_amdgcn_image_sample_3d_v4f32_f32(float4 v4f32, int i32, floa
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_amdgcn_image_sample_3d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_3d_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x float> @test_builtin_amdgcn_image_sample_cube_v4f32_f32(
@@ -1084,7 +1084,7 @@ half4 test_builtin_amdgcn_image_sample_3d_v4f16_f32(half4 v4f16, int i32, float
// CHECK-NEXT: ret <4 x float> [[TMP5]]
//
float4 test_builtin_amdgcn_image_sample_cube_v4f32_f32(float4 v4f32, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_cube_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
// CHECK-LABEL: define dso_local <4 x half> @test_builtin_amdgcn_image_sample_cube_v4f16_f32(
@@ -1115,5 +1115,5 @@ float4 test_builtin_amdgcn_image_sample_cube_v4f32_f32(float4 v4f32, int i32, fl
// CHECK-NEXT: ret <4 x half> [[TMP5]]
//
half4 test_builtin_amdgcn_image_sample_cube_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110);
+ return __builtin_amdgcn_image_sample_cube_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, 110);
}
diff --git a/clang/test/CodeGen/builtins-image-store.c b/clang/test/CodeGen/builtins-image-store.c
index df3f9490230cc..9a5620209b104 100644
--- a/clang/test/CodeGen/builtins-image-store.c
+++ b/clang/test/CodeGen/builtins-image-store.c
@@ -22,12 +22,12 @@ typedef half half4 __attribute__((ext_vector_type(4)));
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP3]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2d.f32.i32.v8i32(float [[TMP0]], i32 12, i32 [[TMP1]], i32 [[TMP2]], <8 x i32> [[TEX_RSRC_VAL]], i32 106, i32 103)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, tex, 106, 103);
+ __builtin_amdgcn_image_store_2d_f32_i32(f32, 12, i32, i32, tex, 106, 103);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2d_1(
@@ -52,7 +52,7 @@ void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
//
void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, 15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2d_2(
@@ -77,7 +77,7 @@ void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex
//
void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, 15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2darray(
@@ -98,12 +98,12 @@ void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.2darray.f32.i32.v8i32(float [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2darray_f32_i32(f32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2darray_f32_i32(f32, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2darray_1(
@@ -129,7 +129,7 @@ void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex
//
void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_2darray_2(
@@ -155,7 +155,7 @@ void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_
//
void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1d_1(
@@ -179,7 +179,7 @@ void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, 15, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1d_2(
@@ -203,7 +203,7 @@ void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex
//
void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, 15, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1darray_1(
@@ -228,7 +228,7 @@ void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
//
void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, 15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_1darray_2(
@@ -253,7 +253,7 @@ void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_
//
void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, 15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_3d_1(
@@ -279,7 +279,7 @@ void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_3d_2(
@@ -305,7 +305,7 @@ void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex
//
void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_cube_1(
@@ -331,7 +331,7 @@ void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex)
//
void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_cube_2(
@@ -357,7 +357,7 @@ void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t t
//
void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1d_1(
@@ -382,7 +382,7 @@ void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t te
//
void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, 15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1d_2(
@@ -407,7 +407,7 @@ void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, 15, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1darray_1(
@@ -433,7 +433,7 @@ void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_1darray_2(
@@ -459,7 +459,7 @@ void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_text
//
void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2d(
@@ -480,12 +480,12 @@ void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_textu
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2d.f32.i32.v8i32(float [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2d_1(
@@ -511,7 +511,7 @@ void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex)
//
void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2d_2(
@@ -537,7 +537,7 @@ void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, 15, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2darray(
@@ -559,12 +559,12 @@ void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[I32_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TEX_ADDR_ASCAST]], align 32
// CHECK-NEXT: [[TEX_RSRC_VAL:%.*]] = load <8 x i32>, ptr [[TMP5]], align 32
-// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.f32.i32.v8i32(float [[TMP0]], i32 1, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
+// CHECK-NEXT: call void @llvm.amdgcn.image.store.mip.2darray.f32.i32.v8i32(float [[TMP0]], i32 15, i32 [[TMP1]], i32 [[TMP2]], i32 [[TMP3]], i32 [[TMP4]], <8 x i32> [[TEX_RSRC_VAL]], i32 120, i32 110)
// CHECK-NEXT: ret void
//
void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, 15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2darray_1(
@@ -591,7 +591,7 @@ void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, 15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_2darray_2(
@@ -618,7 +618,7 @@ void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_text
//
void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, 15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_3d_1(
@@ -645,7 +645,7 @@ void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_textu
//
void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, 15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_3d_2(
@@ -672,7 +672,7 @@ void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, 15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_cube_1(
@@ -699,7 +699,7 @@ void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t
//
void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, 15, i32, i32, i32, i32, tex, 120, 110);
}
// CHECK-LABEL: define dso_local void @test_builtin_image_store_mip_cube_2(
@@ -726,5 +726,5 @@ void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture
//
void test_builtin_image_store_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110);
+ __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, 15, i32, i32, i32, i32, tex, 120, 110);
}
diff --git a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
index 37a9ecb5733da..cd3976bf874ec 100644
--- a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
+++ b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx1100-err.cl
@@ -27,216 +27,412 @@ float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int
return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(8, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
}
-float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_err_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_dmask_power_of_2(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(-1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument value -1 is outside the valid range [1, 15]}}
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(3, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument should be a power of 2}}
}
-float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_err_2(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(31, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument value 31 is outside the valid range [1, 15]}}
+ return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(16, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 16 is outside the valid range [0, 15]}}
}
-float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_err_3(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_gather4_lz_2d_v4f32_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(i32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f32_f32' must be a constant integer}}
}
-float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+float4 test_amdgcn_image_sample_lz_1d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(100, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(15, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_1d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_1d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(15, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_2d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, 103); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, f32, 103); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_2d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 103); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(i32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_2d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
+
float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_3d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_3d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_3d_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 100); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_cube_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_cube_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_1darray_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_1darray_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_l_2darray_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f32_f32' must be a constant integer}}
+}
+
+float4 test_amdgcn_image_sample_d_2darray_v4f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(f32, tex, vec4i32, 0, i32, 11); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(15, f32, tex, vec4i32, 0, i32, 11); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_1d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(23, f32, tex, vec4i32, 0, 12, 11); //expected-error{{argument value 23 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_1d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_1d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(15, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_2d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_2d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(15, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_2d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_3d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_3d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(15, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_3d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_3d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(15, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_3d_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_3d_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_cube_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_cube_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(i32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_cube_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_cube_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(i32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_1darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_1darray_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(15, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_1darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_1darray_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(15, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_l_2darray_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(15, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_v4f16_f32' must be a constant integer}}
+}
+
+half4 test_amdgcn_image_sample_d_2darray_v4f16_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_f32_f32(f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2d_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_lz_2d_f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2d_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_l_2d_f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2d_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_d_2d_f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_lz_2darray_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_lz_2darray_f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_l_2darray_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_l_2darray_f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, f32, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_d_2darray_f32_f32' must be a constant integer}}
+}
+
+float test_amdgcn_image_sample_d_2darray_f32_f32_dmask_range(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
diff --git a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
index 924d2d9b41e06..e60f8c70dc7c4 100644
--- a/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
+++ b/clang/test/SemaOpenCL/builtins-extended-image-param-gfx942-err.cl
@@ -29,199 +29,199 @@ float4 test_amdgcn_image_gather4_lz_2d_v4f32_f32_a(float4 v4f32, float f32, int
float4 test_amdgcn_image_sample_lz_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f32_f32(105, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_1d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f32_f32(10, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_2d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f32_f32(105, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f32_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_3d_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_cube_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f32_f32(1, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_1darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_lz_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_l_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f32_f32' needs target feature extended-image-insts}}
}
float4 test_amdgcn_image_sample_d_2darray_v4f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f32_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1d_v4f16_f32(105, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1d_v4f16_f32(105, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_1d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1d_v4f16_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_2d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_3d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_3d_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_3d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_3d_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_3d_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_3d_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_cube_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_cube_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_cube_v4f16_f32(105, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_cube_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_1darray_v4f16_f32(105, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_1darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_1darray_v4f16_f32(105, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_1darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_1darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_1darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_1darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_lz_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_l_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2darray_v4f16_f32(100, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_v4f16_f32' needs target feature extended-image-insts}}
}
half4 test_amdgcn_image_sample_d_2darray_v4f16_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f16_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2darray_v4f16_f32(100, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_v4f16_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_lz_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2d_f32_f32(f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2d_f32_f32(1, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2d_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_l_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2d_f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2d_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2d_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_d_2d_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2d_f32_f32(f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2d_f32_f32(1, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2d_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_lz_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_lz_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_lz_2darray_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_l_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_l_2darray_f32_f32(f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_l_2darray_f32_f32(1, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_l_2darray_f32_f32' needs target feature extended-image-insts}}
}
float test_amdgcn_image_sample_d_2darray_f32_f32(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_d_2darray_f32_f32(f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_f32_f32' needs target feature extended-image-insts}}
+ return __builtin_amdgcn_image_sample_d_2darray_f32_f32(1, f32, f32, f32, f32, f32, f32, f32, tex, vec4i32, 0, 101, 121); //GFX94-error{{'test_amdgcn_image_sample_d_2darray_f32_f32' needs target feature extended-image-insts}}
}
diff --git a/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl b/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl
index db4af58400840..ae0224b6d0b54 100644
--- a/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-load-param-gfx1100-err.cl
@@ -7,188 +7,265 @@ typedef half half4 __attribute__((ext_vector_type(4)));
float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_f32_i32' must be a constant integer}}
+ float a = __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, i32, tex, 106, 103); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_f32_i32' must be a constant integer}}
+ return a + __builtin_amdgcn_image_load_2d_f32_i32(5, i32, i32, tex, 106, 103); //expected-error{{dmask argument cannot have more bits set than there are elements in return type}}
}
float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(15, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(15, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2d_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_2d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_f32_i32' must be a constant integer}}
+ float a = __builtin_amdgcn_image_load_2darray_f32_i32(1, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_f32_i32' must be a constant integer}}
+ return a + __builtin_amdgcn_image_load_2darray_f32_i32(3, i32, i32, i32, tex, i32, 110); //expected-error{{dmask argument cannot have more bits set than there are elements in return type}}
}
float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2darray_v4f32_i32(15, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_2darray_v4f16_i32(15, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_2darray_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_2darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_2darray_v4f16_i32(100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f16_i32(i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1d_v4f16_i32(15, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_1d_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_1d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_1d_v4f16_i32(100, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1darray_v4f32_i32(15, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f16_i32(i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_1darray_v4f16_i32(15, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_1darray_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_1darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_1darray_v4f16_i32(100, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_3d_v4f32_i32(15, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_3d_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_3d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_3d_v4f16_i32(100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_cube_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_cube_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_cube_v4f16_i32(100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(15, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1d_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_mip_1d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(100, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(15, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_1darray_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_mip_1darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_f32_i32' must be a constant integer}}
+ float a = __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_f32_i32' must be a constant integer}}
+ return a + __builtin_amdgcn_image_load_mip_2d_f32_i32(7, i32, i32, i32, tex, 120, 110); //expected-error{{dmask argument cannot have more bits set than there are elements in return type}}
}
float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(15, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2d_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_mip_2d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_f32_i32' must be a constant integer}}
+ float a = __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_f32_i32' must be a constant integer}}
+ return a + __builtin_amdgcn_image_load_mip_2darray_f32_i32(9, i32, i32, i32, i32, tex, 120, 110); //expected-error{{dmask argument cannot have more bits set than there are elements in return type}}
}
float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(15, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(15, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_2darray_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_mip_2darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(100, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_3d_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_mip_3d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(100, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f32_i32' must be a constant integer}}
}
half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(15, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_load_mip_cube_v4f16_i32' must be a constant integer}}
+}
+half4 test_builtin_image_load_mip_cube_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(100, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_builtin_image_sample_2d(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 106, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_f32_f32' must be a constant integer}}
+ float a = __builtin_amdgcn_image_sample_2d_f32_f32(i32, f32, f32, tex, vec4i32, 0, 106, 103); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_f32_f32' must be a constant integer}}
+ return a + __builtin_amdgcn_image_sample_2d_f32_f32(15, f32, f32, tex, vec4i32, 0, 106, 103); //expected-error{{dmask argument cannot have more bits set than there are elements in return type}}
}
float4 test_builtin_image_sample_2d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(15, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_2d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(15, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2d_v4f16_f32' must be a constant integer}}
+}
+half4 test_builtin_image_sample_2d_dmask_range(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float test_builtin_image_sample_2darray(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_f32_f32' must be a constant integer}}
+ float a = __builtin_amdgcn_image_sample_2darray_f32_f32(1, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_f32_f32' must be a constant integer}}
+ return a + __builtin_amdgcn_image_sample_2darray_f32_f32(11, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{dmask argument cannot have more bits set than there are elements in return type}}
}
float4 test_builtin_image_sample_2darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2darray_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_2darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_2darray_v4f16_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_2darray_v4f16_f32' must be a constant integer}}
+}
+half4 test_builtin_image_sample_2darray_dmask_range(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_sample_1d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f32_f32(f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1d_v4f32_f32(i32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_1d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1d_v4f16_f32(15, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1d_v4f16_f32' must be a constant integer}}
+}
+half4 test_builtin_image_sample_1d_dmask_range(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_1d_v4f16_f32(100, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_sample_1darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1darray_v4f32_f32(15, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_1darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_1darray_v4f16_f32(15, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_1darray_v4f16_f32' must be a constant integer}}
+}
+half4 test_builtin_image_sample_1darray_dmask_range(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_sample_3d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_3d_v4f32_f32(15, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_3d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_3d_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_3d_v4f16_f32' must be a constant integer}}
+}
+half4 test_builtin_image_sample_3d_dmask_range(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+
+ return __builtin_amdgcn_image_sample_3d_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
float4 test_builtin_image_sample_cube_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f32_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_cube_v4f32_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f32_f32' must be a constant integer}}
}
half4 test_builtin_image_sample_cube_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f16_f32' must be a constant integer}}
+ return __builtin_amdgcn_image_sample_cube_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_sample_cube_v4f16_f32' must be a constant integer}}
}
+half4 test_builtin_image_sample_cube_dmask_range(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
+ return __builtin_amdgcn_image_sample_cube_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
+}
diff --git a/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl b/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl
index a08383fcd2e97..b8780024f1076 100644
--- a/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-load-param-gfx942-err.cl
@@ -7,213 +7,213 @@ typedef half half4 __attribute__((ext_vector_type(4)));
float test_builtin_image_load_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d' needs target feature image-insts}}
}
float4 test_builtin_image_load_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2d_2' needs target feature image-insts}}
}
float test_builtin_image_load_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2darray_f32_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray' needs target feature image-insts}}
}
float4 test_builtin_image_load_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2darray_v4f32_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_2darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2darray_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2darray_v4f16_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_2darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1d_v4f16_i32(i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1d_v4f16_i32(100, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_1d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f32_i32(i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1darray_v4f32_i32(100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_1darray_v4f16_i32(i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_1darray_v4f16_i32(100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_1darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_3d_v4f32_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_3d_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_3d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_cube_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1d_v4f16_i32(100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_1d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_1darray_v4f16_i32(100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_1darray_2' needs target feature image-insts}}
}
float test_builtin_image_load_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2d_f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f32_i32(100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2d_v4f16_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2d_2' needs target feature image-insts}}
}
float test_builtin_image_load_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2darray_f32_i32(i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_mip_2darray' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f32_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_2darray_v4f16_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_2darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f32_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_3d_v4f16_i32(i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_load_mip_3d_2' needs target feature image-insts}}
}
float4 test_builtin_image_load_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f32_i32(i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_mip_cube_v4f16_i32(100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_load_mip_cube_2' needs target feature image-insts}}
}
float test_builtin_image_load_2d_gfx(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_f32_i32(i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d_gfx' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_f32_i32(12, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_load_2d_gfx' needs target feature image-insts}}
}
float4 test_builtin_image_load_2d_gfx_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f32_i32(i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f32_i32(100, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_1' needs target feature image-insts}}
}
half4 test_builtin_image_load_2d_gfx_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(100, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_load_2d_gfx_2' needs target feature image-insts}}
}
float test_builtin_image_sample_2d(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 106, 103); //expected-error{{'test_builtin_image_sample_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_f32_f32(i32, f32, f32, tex, vec4i32, 0, 106, 103); //expected-error{{'test_builtin_image_sample_2d' needs target feature image-insts}}
}
float4 test_builtin_image_sample_2d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2d_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_2d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2d_2' needs target feature image-insts}}
}
float test_builtin_image_sample_2darray(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2darray_f32_f32(100, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray' needs target feature image-insts}}
}
float4 test_builtin_image_sample_2darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2darray_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_2darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_2darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2darray_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2darray_v4f16_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_2darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_1d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f32_f32(f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1d_v4f32_f32(i32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_1d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1d_v4f16_f32(f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1d_v4f16_f32(100, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_1d_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_1darray_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f32_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1darray_v4f32_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_1darray_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_1darray_v4f16_f32(f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_1darray_v4f16_f32(100, f32, f32, tex, vec4i32, 0, i32, 110); //expected-error{{'test_builtin_image_sample_1darray_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_3d_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_3d_v4f32_f32(100, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_3d_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_3d_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_3d_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, i32); //expected-error{{'test_builtin_image_sample_3d_2' needs target feature image-insts}}
}
float4 test_builtin_image_sample_cube_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f32_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_cube_v4f32_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_cube_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_cube_v4f16_f32(f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_cube_v4f16_f32(i32, f32, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_cube_2' needs target feature image-insts}}
}
float test_builtin_image_sample_2d_gfx(float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_f32_f32(f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx' needs target feature image-insts}}
}
float4 test_builtin_image_sample_2d_gfx_1(float4 v4f32, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f32_f32(f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f32_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_1' needs target feature image-insts}}
}
half4 test_builtin_image_sample_2d_gfx_2(half4 v4f16, float f32, int i32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(100, f32, f32, tex, vec4i32, 0, 120, 110); //expected-error{{'test_builtin_image_sample_2d_gfx_2' needs target feature image-insts}}
}
diff --git a/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl b/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl
index 11dcceedf471d..949c780432e90 100644
--- a/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-store-param-gfx1100-err.cl
@@ -6,124 +6,172 @@ typedef half half4 __attribute__((ext_vector_type(4)));
void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, tex, 106, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, i32, tex, 106, 103); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, 15, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, 15, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2d_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_2d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_f32_i32(f32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2darray_f32_i32(f32, 15, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, 15, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, 15, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_2darray_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_2darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, 15, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_1d_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_1d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, 100, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, 15, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, 15, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_1darray_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_1darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, 100, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, 15, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_3d_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_3d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_cube_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_cube_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, 15, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1d_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_mip_1d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, 15, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_1darray_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_mip_1darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, 15, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2d_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_mip_2d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, 15, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, 15, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_2darray_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_mip_2darray_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_3d_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_mip_3d_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f32_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f32_i32' must be a constant integer}}
}
void test_builtin_image_store_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f16_i32' must be a constant integer}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, 15, i32, i32, i32, i32, tex, 120, i32); //expected-error{{argument to '__builtin_amdgcn_image_store_mip_cube_v4f16_i32' must be a constant integer}}
+}
+void test_builtin_image_store_mip_cube_dmask_range(half4 v4f16, int i32, __amdgpu_texture_t tex) {
+
+ return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, 110); //expected-error{{argument value 100 is outside the valid range [0, 15]}}
}
diff --git a/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl b/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl
index a9ff602fa08e7..d0085e5403b5f 100644
--- a/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl
+++ b/clang/test/SemaOpenCL/builtins-image-store-param-gfx942-err.cl
@@ -6,124 +6,124 @@ typedef half half4 __attribute__((ext_vector_type(4)));
void test_builtin_image_store_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_store_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2d_f32_i32(f32, i32, i32, i32, tex, 106, 103); //expected-error{{'test_builtin_image_store_2d' needs target feature image-insts}}
}
void test_builtin_image_store_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2d_v4f32_i32(v4f32, 100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2d_1' needs target feature image-insts}}
}
void test_builtin_image_store_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2d_2' needs target feature image-insts}}
}
void test_builtin_image_store_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_f32_i32(f32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2darray_f32_i32(f32, 100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray' needs target feature image-insts}}
}
void test_builtin_image_store_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_2darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_2darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_1' needs target feature image-insts}}
}
void test_builtin_image_store_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1d_v4f16_i32(v4f16, 100, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_1d_2' needs target feature image-insts}}
}
void test_builtin_image_store_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1darray_v4f32_i32(v4f32, 100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_1darray_v4f16_i32(v4f16, 100, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_1darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_3d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_1' needs target feature image-insts}}
}
void test_builtin_image_store_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_3d_2' needs target feature image-insts}}
}
void test_builtin_image_store_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_1' needs target feature image-insts}}
}
void test_builtin_image_store_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_cube_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1d_v4f16_i32(v4f16, 100, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_1d_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_1darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_1darray_v4f16_i32(v4f16, 100, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_1darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2d(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2d_f32_i32(f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f32_i32(v4f32, 100, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2d_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2d_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2darray(float f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2darray' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2darray_f32_i32(f32, i32, i32, i32, i32, i32, tex, 120, 110); //expected-error{{'test_builtin_image_store_mip_2darray' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2darray_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f32_i32(v4f32, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_2darray_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_2darray_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_2darray_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_3d_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_3d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_3d_v4f16_i32(v4f16, i32, i32, i32, i32, i32, tex, i32, 110); //expected-error{{'test_builtin_image_store_mip_3d_2' needs target feature image-insts}}
}
void test_builtin_image_store_mip_cube_1(float4 v4f32, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_1' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f32_i32(v4f32, i32, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_1' needs target feature image-insts}}
}
void test_builtin_image_store_mip_cube_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_2' needs target feature image-insts}}
+ return __builtin_amdgcn_image_store_mip_cube_v4f16_i32(v4f16, 100, i32, i32, i32, i32, tex, 120, i32); //expected-error{{'test_builtin_image_store_mip_cube_2' needs target feature image-insts}}
}
diff --git a/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl b/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl
index 77d016e0e76e4..c65891e9c4433 100644
--- a/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl
+++ b/clang/test/SemaOpenCL/half-float16-vector-compatibility.cl
@@ -51,11 +51,11 @@ float16_4 test_explicit_cast_float16_4_to_half4(float16_4 f16_4) {
}
half4 test_builtin_image_load_2d_2(half4 v4f16, int i32, __amdgpu_texture_t tex) {
- return __builtin_amdgcn_image_load_2d_v4f16_i32(i32, i32, tex, 120, 110); // expected-no-error
+ return __builtin_amdgcn_image_load_2d_v4f16_i32(15, i32, i32, tex, 120, 110); // expected-no-error
}
half4 test_builtin_amdgcn_image_sample_2d_v4f16_f32(half4 v4f16, int i32, float f32, __amdgpu_texture_t tex, int4 vec4i32) {
- return __builtin_amdgcn_image_sample_2d_v4f16_f32(f32, f32, tex, vec4i32, 0, 120, 110); // expected-no-error
+ return __builtin_amdgcn_image_sample_2d_v4f16_f32(15, f32, f32, tex, vec4i32, 0, 120, 110); // expected-no-error
}
void test_half_mismatch_vector_size_error(float16_2 f16_2, float16_3 f16_3, float16_4 f16_4, float16_8 f16_8, float16_16 f16_16) {
>From c34e10acaa7fb84f0c734f944ac1b839ccd0f5af Mon Sep 17 00:00:00 2001
From: Alexey Sachkov <alexey.sachkov at amd.com>
Date: Tue, 17 Feb 2026 03:24:57 -0600
Subject: [PATCH 3/3] Drop legacy assertion
Apparently I was building with assertions disabled locally which is now
fixed.
---
clang/lib/Sema/SemaAMDGPU.cpp | 4 ----
1 file changed, 4 deletions(-)
diff --git a/clang/lib/Sema/SemaAMDGPU.cpp b/clang/lib/Sema/SemaAMDGPU.cpp
index bb0da893b7198..92ced16600cdb 100644
--- a/clang/lib/Sema/SemaAMDGPU.cpp
+++ b/clang/lib/Sema/SemaAMDGPU.cpp
@@ -265,10 +265,6 @@ bool SemaAMDGPU::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
const Type *RetTy = TheCall->getType().getTypePtr();
if (auto *VTy = dyn_cast<VectorType>(RetTy))
NumElementsInRetTy = VTy->getNumElements();
- assert(
- High <= 4 &&
- "DMask is a 4-bit value in underlying LLVM IR intrinsics (and HW "
- "instructions). This assert indicates malformed built-in declaration");
int NumActiveBitsInDMask =
llvm::popcount(static_cast<uint8_t>(Result.getExtValue()));
if (NumActiveBitsInDMask > NumElementsInRetTy) {
More information about the cfe-commits
mailing list