[llvm] d627012 - AMDGPU/GlobalISel: Remove some problematic testcases
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 30 14:05:38 PDT 2021
Author: Matt Arsenault
Date: 2021-06-30T17:05:29-04:00
New Revision: d6270125fc2dd771973f20c33bdb7fd9f91b51d6
URL: https://github.com/llvm/llvm-project/commit/d6270125fc2dd771973f20c33bdb7fd9f91b51d6
DIFF: https://github.com/llvm/llvm-project/commit/d6270125fc2dd771973f20c33bdb7fd9f91b51d6.diff
LOG: AMDGPU/GlobalISel: Remove some problematic testcases
These testcases are a bit nonsensical and won't be handled correctly
for a long time. Remove them to unblock load/store legalization work.
Added:
Modified:
llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir
index 85c71f3cfe88..ae7a829b12af 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir
@@ -1072,154 +1072,3 @@ body: |
G_STORE %2, %0 :: (store (<3 x s4>), addrspace 1, align 2)
...
-
----
-name: test_truncstore_global_v4s8_to_v4s5_align1
-body: |
- bb.0:
- liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
-
- ; SI-LABEL: name: test_truncstore_global_v4s8_to_v4s5_align1
- ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
- ; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
- ; SI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
- ; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
- ; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
- ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
- ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
- ; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C2]]
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY2]](s32)
- ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
- ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC2]]
- ; SI: [[COPY4:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
- ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY4]](s16)
- ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[C1]](s32)
- ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY4]](s16)
- ; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
- ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; SI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
- ; SI: G_STORE [[COPY5]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
- ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C4]](s64)
- ; SI: [[COPY6:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
- ; SI: G_STORE [[COPY6]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
- ; VI-LABEL: name: test_truncstore_global_v4s8_to_v4s5_align1
- ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
- ; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
- ; VI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
- ; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
- ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
- ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
- ; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
- ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
- ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
- ; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
- ; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[COPY2]], [[C1]](s16)
- ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
- ; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s8), addrspace 1)
- ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
- ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
- ; VI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
- ; VI: G_STORE [[ANYEXT1]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 1, addrspace 1)
- ; VI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI: [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
- ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p1) :: (store (s8) into unknown-address + 2, addrspace 1)
- %0:_(p1) = COPY $vgpr0_vgpr1
- %1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- %2:_(<4 x s8>) = G_TRUNC %1
- G_STORE %2, %0 :: (store (<4 x s5>), addrspace 1, align 1)
-
-...
-
----
-name: test_truncstore_global_v4s8_to_v4s5_align2
-body: |
- bb.0:
- liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
-
- ; SI-LABEL: name: test_truncstore_global_v4s8_to_v4s5_align2
- ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
- ; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- ; SI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
- ; SI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
- ; SI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; SI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
- ; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
- ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[UV1]](s32)
- ; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C1]](s32)
- ; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
- ; SI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC2]]
- ; SI: [[COPY3:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
- ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY3]](s16)
- ; SI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
- ; SI: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; SI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C3]](s64)
- ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
- ; SI: G_STORE [[COPY4]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
- ; VI-LABEL: name: test_truncstore_global_v4s8_to_v4s5_align2
- ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
- ; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- ; VI: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>)
- ; VI: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[UV2]](s32)
- ; VI: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; VI: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
- ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[UV]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC1]], [[C]]
- ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[UV1]](s32)
- ; VI: [[AND1:%[0-9]+]]:_(s16) = G_AND [[TRUNC2]], [[C]]
- ; VI: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C1]](s16)
- ; VI: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL]]
- ; VI: [[COPY2:%[0-9]+]]:_(s16) = COPY [[OR]](s16)
- ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY2]](s16)
- ; VI: G_STORE [[ANYEXT]](s32), [[COPY]](p1) :: (store (s16), addrspace 1)
- ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; VI: [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[COPY]], [[C2]](s64)
- ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UV2]](s32)
- ; VI: G_STORE [[COPY3]](s32), [[PTR_ADD]](p1) :: (store (s8) into unknown-address + 2, align 2, addrspace 1)
- %0:_(p1) = COPY $vgpr0_vgpr1
- %1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- %2:_(<4 x s8>) = G_TRUNC %1
- G_STORE %2, %0 :: (store (<4 x s5>), addrspace 1, align 2)
-
-...
-
----
-name: test_truncstore_global_v4s8_to_v4s5_align4
-body: |
- bb.0:
- liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5
-
- ; SI-LABEL: name: test_truncstore_global_v4s8_to_v4s5_align4
- ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
- ; SI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- ; SI: [[TRUNC:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[COPY1]](<4 x s32>)
- ; SI: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[TRUNC]](<4 x s8>)
- ; SI: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store (<4 x s5>), align 4, addrspace 1)
- ; VI-LABEL: name: test_truncstore_global_v4s8_to_v4s5_align4
- ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1
- ; VI: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- ; VI: [[TRUNC:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[COPY1]](<4 x s32>)
- ; VI: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[TRUNC]](<4 x s8>)
- ; VI: G_STORE [[BITCAST]](s32), [[COPY]](p1) :: (store (<4 x s5>), align 4, addrspace 1)
- %0:_(p1) = COPY $vgpr0_vgpr1
- %1:_(<4 x s32>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5
- %2:_(<4 x s8>) = G_TRUNC %1
- G_STORE %2, %0 :: (store (<4 x s5>), addrspace 1, align 4)
-
-...
More information about the llvm-commits
mailing list