[llvm] Pre-commit AMDGPU tests for masked load/store/scatter/gather (PR #104645)
Krzysztof Drewniak via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 19 08:29:13 PDT 2024
https://github.com/krzysz00 updated https://github.com/llvm/llvm-project/pull/104645
>From 084c02db4c6fd52e0323c6097d05ac5e67310e87 Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <Krzysztof.Drewniak at amd.com>
Date: Fri, 16 Aug 2024 22:06:04 +0000
Subject: [PATCH 1/2] Pre-commit AMDGPU tests for masked
load/store/scatter/gather
I'm planning to fix the masked operation scalarazer to not generate
suboptimal code on AMD GPUs and other SIMT machines, and so am adding
tests now.
---
.../AMDGPU/expamd-masked-load.ll | 58 +++++++++++++++++++
.../AMDGPU/expand-masked-gather.ll | 38 ++++++++++++
.../AMDGPU/expand-masked-scatter.ll | 36 ++++++++++++
.../AMDGPU/expand-masked-store.ll | 55 ++++++++++++++++++
.../AMDGPU/lit.local.cfg | 2 +
5 files changed, 189 insertions(+)
create mode 100644 llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll
create mode 100644 llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-gather.ll
create mode 100644 llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-scatter.ll
create mode 100644 llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll
create mode 100644 llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/lit.local.cfg
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll
new file mode 100644
index 00000000000000..cae48f84a56f0e
--- /dev/null
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S %s -passes=scalarize-masked-mem-intrin -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; COM: Test that, unlike on CPU targets, the mask doesn't get bitcast to a scalar,
+; COM: since, on GPUs, each i1 takes up at least one register and so they should
+; COM: be treated separately.
+
+define <2 x i32> @scalarize_v2i32(ptr %p, <2 x i1> %mask, <2 x i32> %passthru) {
+; CHECK-LABEL: define <2 x i32> @scalarize_v2i32(
+; CHECK-SAME: ptr [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x i32> [[PASSTHRU:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_LOAD:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_LOAD]]:
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> [[PASSTHRU]], i32 [[TMP4]], i64 0
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[RES_PHI_ELSE:%.*]] = phi <2 x i32> [ [[TMP5]], %[[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-NEXT: br i1 [[TMP7]], label %[[COND_LOAD1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_LOAD1]]:
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i32> [[RES_PHI_ELSE]], i32 [[TMP9]], i64 1
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: [[RES_PHI_ELSE3:%.*]] = phi <2 x i32> [ [[TMP10]], %[[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], %[[ELSE]] ]
+; CHECK-NEXT: ret <2 x i32> [[RES_PHI_ELSE3]]
+;
+ %ret = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %p, i32 128, <2 x i1> %mask, <2 x i32> %passthru)
+ ret <2 x i32> %ret
+}
+
+define <2 x i32> @scalarize_v2i32_splat_mask(ptr %p, i1 %mask, <2 x i32> %passthrough) {
+; CHECK-LABEL: define <2 x i32> @scalarize_v2i32_splat_mask(
+; CHECK-SAME: ptr [[P:%.*]], i1 [[MASK:%.*]], <2 x i32> [[PASSTHROUGH:%.*]]) {
+; CHECK-NEXT: [[MASK_VEC:%.*]] = insertelement <2 x i1> poison, i1 [[MASK]], i32 0
+; CHECK-NEXT: [[MASK_SPLAT:%.*]] = shufflevector <2 x i1> [[MASK_VEC]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[MASK_SPLAT_FIRST:%.*]] = extractelement <2 x i1> [[MASK_SPLAT]], i64 0
+; CHECK-NEXT: br i1 [[MASK_SPLAT_FIRST]], label %[[COND_LOAD:.*]], label %[[BB1:.*]]
+; CHECK: [[COND_LOAD]]:
+; CHECK-NEXT: [[RET_COND_LOAD:%.*]] = load <2 x i32>, ptr [[P]], align 8
+; CHECK-NEXT: br label %[[BB1]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: [[RET:%.*]] = phi <2 x i32> [ [[RET_COND_LOAD]], %[[COND_LOAD]] ], [ [[PASSTHROUGH]], [[TMP0:%.*]] ]
+; CHECK-NEXT: ret <2 x i32> [[RET]]
+;
+ %mask.vec = insertelement <2 x i1> poison, i1 %mask, i32 0
+ %mask.splat = shufflevector <2 x i1> %mask.vec, <2 x i1> poison, <2 x i32> zeroinitializer
+ %ret = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %p, i32 8, <2 x i1> %mask.splat, <2 x i32> %passthrough)
+ ret <2 x i32> %ret
+}
+
+declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-gather.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-gather.ll
new file mode 100644
index 00000000000000..94d0e2943d9366
--- /dev/null
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-gather.ll
@@ -0,0 +1,38 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S %s -passes=scalarize-masked-mem-intrin -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; COM: Test that, unlike on CPU targets, the mask doesn't get bitcast to a scalar,
+; COM: since, on GPUs, each i1 takes up at least one register and so they should
+; COM: be treated separately.
+
+define <2 x i32> @scalarize_v2i32(<2 x ptr> %p, <2 x i1> %mask, <2 x i32> %passthru) {
+; CHECK-LABEL: define <2 x i32> @scalarize_v2i32(
+; CHECK-SAME: <2 x ptr> [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x i32> [[PASSTHRU:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_LOAD:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_LOAD]]:
+; CHECK-NEXT: [[PTR0:%.*]] = extractelement <2 x ptr> [[P]], i64 0
+; CHECK-NEXT: [[LOAD0:%.*]] = load i32, ptr [[PTR0]], align 8
+; CHECK-NEXT: [[RES0:%.*]] = insertelement <2 x i32> [[PASSTHRU]], i32 [[LOAD0]], i64 0
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[RES_PHI_ELSE:%.*]] = phi <2 x i32> [ [[RES0]], %[[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i2 [[TMP3]], 0
+; CHECK-NEXT: br i1 [[TMP4]], label %[[COND_LOAD1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_LOAD1]]:
+; CHECK-NEXT: [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
+; CHECK-NEXT: [[LOAD1:%.*]] = load i32, ptr [[PTR1]], align 8
+; CHECK-NEXT: [[RES1:%.*]] = insertelement <2 x i32> [[RES_PHI_ELSE]], i32 [[LOAD1]], i64 1
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: [[RES_PHI_ELSE3:%.*]] = phi <2 x i32> [ [[RES1]], %[[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], %[[ELSE]] ]
+; CHECK-NEXT: ret <2 x i32> [[RES_PHI_ELSE3]]
+;
+ %ret = call <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr> %p, i32 8, <2 x i1> %mask, <2 x i32> %passthru)
+ ret <2 x i32> %ret
+}
+
+declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>)
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-scatter.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-scatter.ll
new file mode 100644
index 00000000000000..45debf35d06e4f
--- /dev/null
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-scatter.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S %s -passes=scalarize-masked-mem-intrin -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; COM: Test that, unlike on CPU targets, the mask doesn't get bitcast to a scalar,
+; COM: since, on GPUs, each i1 takes up at least one register and so they should
+; COM: be treated separately.
+
+define void @scalarize_v2i32(<2 x ptr> %p, <2 x i1> %mask, <2 x i32> %value) {
+; CHECK-LABEL: define void @scalarize_v2i32(
+; CHECK-SAME: <2 x ptr> [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x i32> [[VALUE:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_STORE:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_STORE]]:
+; CHECK-NEXT: [[ELT0:%.*]] = extractelement <2 x i32> [[VALUE]], i64 0
+; CHECK-NEXT: [[PTR0:%.*]] = extractelement <2 x ptr> [[P]], i64 0
+; CHECK-NEXT: store i32 [[ELT0]], ptr [[PTR0]], align 8
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[TMP3:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i2 [[TMP3]], 0
+; CHECK-NEXT: br i1 [[TMP4]], label %[[COND_STORE1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_STORE1]]:
+; CHECK-NEXT: [[ELT1:%.*]] = extractelement <2 x i32> [[VALUE]], i64 1
+; CHECK-NEXT: [[PTR1:%.*]] = extractelement <2 x ptr> [[P]], i64 1
+; CHECK-NEXT: store i32 [[ELT1]], ptr [[PTR1]], align 8
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.scatter.v2i32.v2p0(<2 x i32> %value, <2 x ptr> %p, i32 8, <2 x i1> %mask)
+ ret void
+}
+
+declare void @llvm.masked.scatter.v2i32.v2p0(<2 x i32>, <2 x ptr>, i32, <2 x i1>)
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll
new file mode 100644
index 00000000000000..3c2f2eed73f840
--- /dev/null
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll
@@ -0,0 +1,55 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S %s -passes=scalarize-masked-mem-intrin -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+; COM: Test that, unlike on CPU targets, the mask doesn't get bitcast to a scalar,
+; COM: since, on GPUs, each i1 takes up at least one register and so they should
+; COM: be treated separately.
+
+define void @scalarize_v2i32(ptr %p, <2 x i1> %mask, <2 x i32> %data) {
+; CHECK-LABEL: define void @scalarize_v2i32(
+; CHECK-SAME: ptr [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x i32> [[DATA:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_STORE:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_STORE]]:
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[DATA]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 0
+; CHECK-NEXT: store i32 [[TMP3]], ptr [[TMP4]], align 4
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[TMP5:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i2 [[TMP5]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label %[[COND_STORE1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_STORE1]]:
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[DATA]], i64 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[P]], i32 1
+; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP8]], align 4
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.store.v2i32.p0(<2 x i32> %data, ptr %p, i32 128, <2 x i1> %mask)
+ ret void
+}
+
+define void @scalarize_v2i32_splat_mask(ptr %p, <2 x i32> %data, i1 %mask) {
+; CHECK-LABEL: define void @scalarize_v2i32_splat_mask(
+; CHECK-SAME: ptr [[P:%.*]], <2 x i32> [[DATA:%.*]], i1 [[MASK:%.*]]) {
+; CHECK-NEXT: [[MASK_VEC:%.*]] = insertelement <2 x i1> poison, i1 [[MASK]], i32 0
+; CHECK-NEXT: [[MASK_SPLAT:%.*]] = shufflevector <2 x i1> [[MASK_VEC]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[MASK_SPLAT_FIRST:%.*]] = extractelement <2 x i1> [[MASK_SPLAT]], i64 0
+; CHECK-NEXT: br i1 [[MASK_SPLAT_FIRST]], label %[[COND_STORE:.*]], label %[[BB1:.*]]
+; CHECK: [[COND_STORE]]:
+; CHECK-NEXT: store <2 x i32> [[DATA]], ptr [[P]], align 8
+; CHECK-NEXT: br label %[[BB1]]
+; CHECK: [[BB1]]:
+; CHECK-NEXT: ret void
+;
+ %mask.vec = insertelement <2 x i1> poison, i1 %mask, i32 0
+ %mask.splat = shufflevector <2 x i1> %mask.vec, <2 x i1> poison, <2 x i32> zeroinitializer
+ call void @llvm.masked.store.v2i32.p0(<2 x i32> %data, ptr %p, i32 8, <2 x i1> %mask.splat)
+ ret void
+}
+
+declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>)
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/lit.local.cfg b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/lit.local.cfg
new file mode 100644
index 00000000000000..7c492428aec761
--- /dev/null
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "AMDGPU" in config.root.targets:
+ config.unsupported = True
>From d775b768818d5e5bc3bdc4f8a4797f82841f0c58 Mon Sep 17 00:00:00 2001
From: Krzysztof Drewniak <Krzysztof.Drewniak at amd.com>
Date: Mon, 19 Aug 2024 15:28:59 +0000
Subject: [PATCH 2/2] Add a few more tests
---
.../AMDGPU/expamd-masked-load.ll | 62 +++++++++++++++++++
.../AMDGPU/expand-masked-store.ll | 58 +++++++++++++++++
2 files changed, 120 insertions(+)
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll
index cae48f84a56f0e..668ced085a685a 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expamd-masked-load.ll
@@ -55,4 +55,66 @@ define <2 x i32> @scalarize_v2i32_splat_mask(ptr %p, i1 %mask, <2 x i32> %passth
ret <2 x i32> %ret
}
+define <2 x half> @scalarize_v2f16(ptr %p, <2 x i1> %mask, <2 x half> %passthru) {
+; CHECK-LABEL: define <2 x half> @scalarize_v2f16(
+; CHECK-SAME: ptr [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x half> [[PASSTHRU:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_LOAD:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_LOAD]]:
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds half, ptr [[P]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = load half, ptr [[TMP3]], align 2
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x half> [[PASSTHRU]], half [[TMP4]], i64 0
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[RES_PHI_ELSE:%.*]] = phi <2 x half> [ [[TMP5]], %[[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-NEXT: br i1 [[TMP7]], label %[[COND_LOAD1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_LOAD1]]:
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds half, ptr [[P]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load half, ptr [[TMP8]], align 2
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x half> [[RES_PHI_ELSE]], half [[TMP9]], i64 1
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: [[RES_PHI_ELSE3:%.*]] = phi <2 x half> [ [[TMP10]], %[[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], %[[ELSE]] ]
+; CHECK-NEXT: ret <2 x half> [[RES_PHI_ELSE3]]
+;
+ %ret = call <2 x half> @llvm.masked.load.v2f16.p0(ptr %p, i32 128, <2 x i1> %mask, <2 x half> %passthru)
+ ret <2 x half> %ret
+}
+
+define <2 x i32> @scalarize_v2i32_p3(ptr addrspace(3) %p, <2 x i1> %mask, <2 x i32> %passthru) {
+; CHECK-LABEL: define <2 x i32> @scalarize_v2i32_p3(
+; CHECK-SAME: ptr addrspace(3) [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x i32> [[PASSTHRU:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_LOAD:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_LOAD]]:
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr addrspace(3) [[P]], i32 0
+; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspace(3) [[TMP3]], align 4
+; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> [[PASSTHRU]], i32 [[TMP4]], i64 0
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[RES_PHI_ELSE:%.*]] = phi <2 x i32> [ [[TMP5]], %[[COND_LOAD]] ], [ [[PASSTHRU]], [[TMP0:%.*]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ne i2 [[TMP6]], 0
+; CHECK-NEXT: br i1 [[TMP7]], label %[[COND_LOAD1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_LOAD1]]:
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr addrspace(3) [[P]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr addrspace(3) [[TMP8]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i32> [[RES_PHI_ELSE]], i32 [[TMP9]], i64 1
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: [[RES_PHI_ELSE3:%.*]] = phi <2 x i32> [ [[TMP10]], %[[COND_LOAD1]] ], [ [[RES_PHI_ELSE]], %[[ELSE]] ]
+; CHECK-NEXT: ret <2 x i32> [[RES_PHI_ELSE3]]
+;
+ %ret = call <2 x i32> @llvm.masked.load.v2i32.p3(ptr addrspace(3) %p, i32 128, <2 x i1> %mask, <2 x i32> %passthru)
+ ret <2 x i32> %ret
+}
+
declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
+declare <2 x half> @llvm.masked.load.v2f16.p0(ptr, i32, <2 x i1>, <2 x half>)
+declare <2 x i32> @llvm.masked.load.v2i32.p3(ptr addrspace(3), i32, <2 x i1>, <2 x i32>)
diff --git a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll
index 3c2f2eed73f840..35712bab881dc3 100644
--- a/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll
+++ b/llvm/test/Transforms/ScalarizeMaskedMemIntrin/AMDGPU/expand-masked-store.ll
@@ -52,4 +52,62 @@ define void @scalarize_v2i32_splat_mask(ptr %p, <2 x i32> %data, i1 %mask) {
ret void
}
+define void @scalarize_v2f16(ptr %p, <2 x i1> %mask, <2 x half> %data) {
+; CHECK-LABEL: define void @scalarize_v2f16(
+; CHECK-SAME: ptr [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x half> [[DATA:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_STORE:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_STORE]]:
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x half> [[DATA]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds half, ptr [[P]], i32 0
+; CHECK-NEXT: store half [[TMP3]], ptr [[TMP4]], align 2
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[TMP5:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i2 [[TMP5]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label %[[COND_STORE1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_STORE1]]:
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x half> [[DATA]], i64 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds half, ptr [[P]], i32 1
+; CHECK-NEXT: store half [[TMP7]], ptr [[TMP8]], align 2
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.store.v2f16.p0(<2 x half> %data, ptr %p, i32 128, <2 x i1> %mask)
+ ret void
+}
+
+define void @scalarize_v2i32_p3(ptr addrspace(3) %p, <2 x i1> %mask, <2 x i32> %data) {
+; CHECK-LABEL: define void @scalarize_v2i32_p3(
+; CHECK-SAME: ptr addrspace(3) [[P:%.*]], <2 x i1> [[MASK:%.*]], <2 x i32> [[DATA:%.*]]) {
+; CHECK-NEXT: [[SCALAR_MASK:%.*]] = bitcast <2 x i1> [[MASK]] to i2
+; CHECK-NEXT: [[TMP1:%.*]] = and i2 [[SCALAR_MASK]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i2 [[TMP1]], 0
+; CHECK-NEXT: br i1 [[TMP2]], label %[[COND_STORE:.*]], label %[[ELSE:.*]]
+; CHECK: [[COND_STORE]]:
+; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i32> [[DATA]], i64 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr addrspace(3) [[P]], i32 0
+; CHECK-NEXT: store i32 [[TMP3]], ptr addrspace(3) [[TMP4]], align 4
+; CHECK-NEXT: br label %[[ELSE]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[TMP5:%.*]] = and i2 [[SCALAR_MASK]], -2
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i2 [[TMP5]], 0
+; CHECK-NEXT: br i1 [[TMP6]], label %[[COND_STORE1:.*]], label %[[ELSE2:.*]]
+; CHECK: [[COND_STORE1]]:
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[DATA]], i64 1
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr addrspace(3) [[P]], i32 1
+; CHECK-NEXT: store i32 [[TMP7]], ptr addrspace(3) [[TMP8]], align 4
+; CHECK-NEXT: br label %[[ELSE2]]
+; CHECK: [[ELSE2]]:
+; CHECK-NEXT: ret void
+;
+ call void @llvm.masked.store.v2i32.p3(<2 x i32> %data, ptr addrspace(3) %p, i32 128, <2 x i1> %mask)
+ ret void
+}
+
declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>)
+declare void @llvm.masked.store.v2f16.p0(<2 x half>, ptr, i32, <2 x i1>)
+declare void @llvm.masked.store.v2i32.p3(<2 x i32>, ptr addrspace(3), i32, <2 x i1>)
More information about the llvm-commits
mailing list