[llvm] r271082 - AMDGPU: Cleanup vector insert/extract tests
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Fri May 27 17:51:10 PDT 2016
Author: arsenm
Date: Fri May 27 19:51:06 2016
New Revision: 271082
URL: http://llvm.org/viewvc/llvm-project?rev=271082&view=rev
Log:
AMDGPU: Cleanup vector insert/extract tests
This mostly makes sure that 3-vector dynamic inserts
and extracts are covered.
Added:
llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll
llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
- copied, changed from r271081, llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll
llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll
- copied, changed from r271081, llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll
Removed:
llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll
llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll
llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll
Modified:
llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll
Removed: llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll?rev=271081&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll (removed)
@@ -1,43 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-
-; How the replacement of i64 stores with v2i32 stores resulted in
-; breaking other users of the bitcast if they already existed
-
-; GCN-LABEL: {{^}}extract_vector_elt_select_error:
-; GCN: buffer_store_dword
-; GCN: buffer_store_dword
-; GCN: buffer_store_dwordx2
-define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %val) nounwind {
- %vec = bitcast i64 %val to <2 x i32>
- %elt0 = extractelement <2 x i32> %vec, i32 0
- %elt1 = extractelement <2 x i32> %vec, i32 1
-
- store volatile i32 %elt0, i32 addrspace(1)* %out
- store volatile i32 %elt1, i32 addrspace(1)* %out
- store volatile i64 %val, i64 addrspace(1)* %in
- ret void
-}
-
-
-define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) nounwind {
- %p0 = extractelement <2 x i64> %foo, i32 0
- %p1 = extractelement <2 x i64> %foo, i32 1
- %out1 = getelementptr i64, i64 addrspace(1)* %out, i32 1
- store volatile i64 %p1, i64 addrspace(1)* %out
- store volatile i64 %p0, i64 addrspace(1)* %out1
- ret void
-}
-
-define void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) nounwind {
- %dynelt = extractelement <2 x i64> %foo, i32 %elt
- store volatile i64 %dynelt, i64 addrspace(1)* %out
- ret void
-}
-
-define void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %foo, i32 %elt, <2 x i64> %arst) nounwind {
- %load = load volatile <2 x i64>, <2 x i64> addrspace(1)* %foo
- %or = or <2 x i64> %load, %arst
- %dynelt = extractelement <2 x i64> %or, i32 %elt
- store volatile i64 %dynelt, i64 addrspace(1)* %out
- ret void
-}
Removed: llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll?rev=271081&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll (removed)
@@ -1,111 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v1i8:
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v1i8(i8 addrspace(1)* %out, <1 x i8> %foo) #0 {
- %p0 = extractelement <1 x i8> %foo, i32 0
- store i8 %p0, i8 addrspace(1)* %out
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v2i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v2i8(i8 addrspace(1)* %out, <2 x i8> %foo) #0 {
- %p0 = extractelement <2 x i8> %foo, i32 0
- %p1 = extractelement <2 x i8> %foo, i32 1
- %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
- store i8 %p1, i8 addrspace(1)* %out
- store i8 %p0, i8 addrspace(1)* %out1
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v3i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo) #0 {
- %p0 = extractelement <3 x i8> %foo, i32 0
- %p1 = extractelement <3 x i8> %foo, i32 2
- %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
- store i8 %p1, i8 addrspace(1)* %out
- store i8 %p0, i8 addrspace(1)* %out1
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v4i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo) #0 {
- %p0 = extractelement <4 x i8> %foo, i32 0
- %p1 = extractelement <4 x i8> %foo, i32 2
- %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
- store i8 %p1, i8 addrspace(1)* %out
- store i8 %p0, i8 addrspace(1)* %out1
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v8i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v8i8(i8 addrspace(1)* %out, <8 x i8> %foo) #0 {
- %p0 = extractelement <8 x i8> %foo, i32 0
- %p1 = extractelement <8 x i8> %foo, i32 2
- %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
- store i8 %p1, i8 addrspace(1)* %out
- store i8 %p0, i8 addrspace(1)* %out1
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v16i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v16i8(i8 addrspace(1)* %out, <16 x i8> %foo) #0 {
- %p0 = extractelement <16 x i8> %foo, i32 0
- %p1 = extractelement <16 x i8> %foo, i32 2
- %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
- store i8 %p1, i8 addrspace(1)* %out
- store i8 %p0, i8 addrspace(1)* %out1
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v32i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v32i8(i8 addrspace(1)* %out, <32 x i8> %foo) #0 {
- %p0 = extractelement <32 x i8> %foo, i32 0
- %p1 = extractelement <32 x i8> %foo, i32 2
- %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
- store i8 %p1, i8 addrspace(1)* %out
- store i8 %p0, i8 addrspace(1)* %out1
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v64i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
-define void @extract_vector_elt_v64i8(i8 addrspace(1)* %out, <64 x i8> %foo) #0 {
- %p0 = extractelement <64 x i8> %foo, i32 0
- %p1 = extractelement <64 x i8> %foo, i32 2
- %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
- store i8 %p1, i8 addrspace(1)* %out
- store i8 %p0, i8 addrspace(1)* %out1
- ret void
-}
-
-attributes #0 = { nounwind }
Added: llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll?rev=271082&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll Fri May 27 19:51:06 2016
@@ -0,0 +1,29 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+
+; GCN-LABEL: {{^}}extract_vector_elt_v3f64_2:
+; GCN: buffer_load_dwordx4
+; GCN: buffer_load_dwordx2
+; GCN: buffer_store_dwordx2
+define void @extract_vector_elt_v3f64_2(double addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 {
+ %ld = load volatile <3 x double>, <3 x double> addrspace(1)* %in
+ %elt = extractelement <3 x double> %ld, i32 2
+ store volatile double %elt, double addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3f64:
+define void @dyn_extract_vector_elt_v3f64(double addrspace(1)* %out, <3 x double> %foo, i32 %elt) #0 {
+ %dynelt = extractelement <3 x double> %foo, i32 %elt
+ store volatile double %dynelt, double addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4f64:
+define void @dyn_extract_vector_elt_v4f64(double addrspace(1)* %out, <4 x double> %foo, i32 %elt) #0 {
+ %dynelt = extractelement <4 x double> %foo, i32 %elt
+ store volatile double %dynelt, double addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
Added: llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll?rev=271082&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll Fri May 27 19:51:06 2016
@@ -0,0 +1,86 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}extract_vector_elt_v2i16:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) #0 {
+ %p0 = extractelement <2 x i16> %foo, i32 0
+ %p1 = extractelement <2 x i16> %foo, i32 1
+ %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 10
+ store i16 %p1, i16 addrspace(1)* %out, align 2
+ store i16 %p0, i16 addrspace(1)* %out1, align 2
+ ret void
+}
+
+; FUNC-LABEL: {{^}}extract_vector_elt_v3i16:
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+define void @extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo) #0 {
+ %p0 = extractelement <3 x i16> %foo, i32 0
+ %p1 = extractelement <3 x i16> %foo, i32 2
+ %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
+ store i16 %p1, i16 addrspace(1)* %out, align 2
+ store i16 %p0, i16 addrspace(1)* %out1, align 2
+ ret void
+}
+
+; FUNC-LABEL: {{^}}extract_vector_elt_v4i16:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) #0 {
+ %p0 = extractelement <4 x i16> %foo, i32 0
+ %p1 = extractelement <4 x i16> %foo, i32 2
+ %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 10
+ store i16 %p1, i16 addrspace(1)* %out, align 2
+ store i16 %p0, i16 addrspace(1)* %out1, align 2
+ ret void
+}
+
+
+; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v3i16:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+
+; GCN: buffer_store_short
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+define void @dynamic_extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo, i32 %idx) #0 {
+ %p0 = extractelement <3 x i16> %foo, i32 %idx
+ %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
+ store i16 %p0, i16 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v4i16:
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+; GCN: buffer_store_short
+
+; GCN: buffer_store_short
+; GCN: buffer_load_ushort
+; GCN: buffer_store_short
+define void @dynamic_extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo, i32 %idx) #0 {
+ %p0 = extractelement <4 x i16> %foo, i32 %idx
+ %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
+ store i16 %p0, i16 addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
Copied: llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll (from r271081, llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll&p1=llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll&r1=271081&r2=271082&rev=271082&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll Fri May 27 19:51:06 2016
@@ -1,4 +1,5 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; How the replacement of i64 stores with v2i32 stores resulted in
; breaking other users of the bitcast if they already existed
@@ -7,7 +8,7 @@
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dwordx2
-define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %val) nounwind {
+define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %val) #0 {
%vec = bitcast i64 %val to <2 x i32>
%elt0 = extractelement <2 x i32> %vec, i32 0
%elt1 = extractelement <2 x i32> %vec, i32 1
@@ -18,8 +19,8 @@ define void @extract_vector_elt_select_e
ret void
}
-
-define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) nounwind {
+; GCN-LABEL: {{^}}extract_vector_elt_v2i64:
+define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) #0 {
%p0 = extractelement <2 x i64> %foo, i32 0
%p1 = extractelement <2 x i64> %foo, i32 1
%out1 = getelementptr i64, i64 addrspace(1)* %out, i32 1
@@ -28,16 +29,34 @@ define void @extract_vector_elt_v2i64(i6
ret void
}
-define void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) nounwind {
+; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64:
+define void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) #0 {
%dynelt = extractelement <2 x i64> %foo, i32 %elt
store volatile i64 %dynelt, i64 addrspace(1)* %out
ret void
}
-define void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %foo, i32 %elt, <2 x i64> %arst) nounwind {
+; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64_2:
+define void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %foo, i32 %elt, <2 x i64> %arst) #0 {
%load = load volatile <2 x i64>, <2 x i64> addrspace(1)* %foo
%or = or <2 x i64> %load, %arst
%dynelt = extractelement <2 x i64> %or, i32 %elt
store volatile i64 %dynelt, i64 addrspace(1)* %out
ret void
}
+
+; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3i64:
+define void @dyn_extract_vector_elt_v3i64(i64 addrspace(1)* %out, <3 x i64> %foo, i32 %elt) #0 {
+ %dynelt = extractelement <3 x i64> %foo, i32 %elt
+ store volatile i64 %dynelt, i64 addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4i64:
+define void @dyn_extract_vector_elt_v4i64(i64 addrspace(1)* %out, <4 x i64> %foo, i32 %elt) #0 {
+ %dynelt = extractelement <4 x i64> %foo, i32 %elt
+ store volatile i64 %dynelt, i64 addrspace(1)* %out
+ ret void
+}
+
+attributes #0 = { nounwind }
Copied: llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll (from r271081, llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll&p1=llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll&r1=271081&r2=271082&rev=271082&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll Fri May 27 19:51:06 2016
@@ -1,9 +1,9 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
; FUNC-LABEL: {{^}}extract_vector_elt_v1i8:
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v1i8(i8 addrspace(1)* %out, <1 x i8> %foo) #0 {
%p0 = extractelement <1 x i8> %foo, i32 0
store i8 %p0, i8 addrspace(1)* %out
@@ -11,10 +11,10 @@ define void @extract_vector_elt_v1i8(i8
}
; FUNC-LABEL: {{^}}extract_vector_elt_v2i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v2i8(i8 addrspace(1)* %out, <2 x i8> %foo) #0 {
%p0 = extractelement <2 x i8> %foo, i32 0
%p1 = extractelement <2 x i8> %foo, i32 1
@@ -25,10 +25,10 @@ define void @extract_vector_elt_v2i8(i8
}
; FUNC-LABEL: {{^}}extract_vector_elt_v3i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo) #0 {
%p0 = extractelement <3 x i8> %foo, i32 0
%p1 = extractelement <3 x i8> %foo, i32 2
@@ -39,10 +39,10 @@ define void @extract_vector_elt_v3i8(i8
}
; FUNC-LABEL: {{^}}extract_vector_elt_v4i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo) #0 {
%p0 = extractelement <4 x i8> %foo, i32 0
%p1 = extractelement <4 x i8> %foo, i32 2
@@ -53,10 +53,10 @@ define void @extract_vector_elt_v4i8(i8
}
; FUNC-LABEL: {{^}}extract_vector_elt_v8i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v8i8(i8 addrspace(1)* %out, <8 x i8> %foo) #0 {
%p0 = extractelement <8 x i8> %foo, i32 0
%p1 = extractelement <8 x i8> %foo, i32 2
@@ -67,10 +67,10 @@ define void @extract_vector_elt_v8i8(i8
}
; FUNC-LABEL: {{^}}extract_vector_elt_v16i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v16i8(i8 addrspace(1)* %out, <16 x i8> %foo) #0 {
%p0 = extractelement <16 x i8> %foo, i32 0
%p1 = extractelement <16 x i8> %foo, i32 2
@@ -81,10 +81,10 @@ define void @extract_vector_elt_v16i8(i8
}
; FUNC-LABEL: {{^}}extract_vector_elt_v32i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v32i8(i8 addrspace(1)* %out, <32 x i8> %foo) #0 {
%p0 = extractelement <32 x i8> %foo, i32 0
%p1 = extractelement <32 x i8> %foo, i32 2
@@ -95,10 +95,10 @@ define void @extract_vector_elt_v32i8(i8
}
; FUNC-LABEL: {{^}}extract_vector_elt_v64i8:
-; SI: buffer_load_ubyte
-; SI: buffer_load_ubyte
-; SI: buffer_store_byte
-; SI: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
define void @extract_vector_elt_v64i8(i8 addrspace(1)* %out, <64 x i8> %foo) #0 {
%p0 = extractelement <64 x i8> %foo, i32 0
%p1 = extractelement <64 x i8> %foo, i32 2
@@ -108,4 +108,44 @@ define void @extract_vector_elt_v64i8(i8
ret void
}
+; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v3i8:
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
+
+; GCN: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+define void @dynamic_extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo, i32 %idx) #0 {
+ %p0 = extractelement <3 x i8> %foo, i32 %idx
+ %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
+ store i8 %p0, i8 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v4i8:
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
+; GCN: buffer_store_byte
+
+; GCN: buffer_store_byte
+; GCN: buffer_load_ubyte
+; GCN: buffer_store_byte
+define void @dynamic_extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo, i32 %idx) #0 {
+ %p0 = extractelement <4 x i8> %foo, i32 %idx
+ %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1
+ store i8 %p0, i8 addrspace(1)* %out
+ ret void
+}
+
attributes #0 = { nounwind }
Removed: llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll?rev=271081&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll (removed)
@@ -1,30 +0,0 @@
-; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v2i16:
-; SI: buffer_load_ushort
-; SI: buffer_load_ushort
-; SI: buffer_store_short
-; SI: buffer_store_short
-define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) nounwind {
- %p0 = extractelement <2 x i16> %foo, i32 0
- %p1 = extractelement <2 x i16> %foo, i32 1
- %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
- store i16 %p1, i16 addrspace(1)* %out, align 2
- store i16 %p0, i16 addrspace(1)* %out1, align 2
- ret void
-}
-
-; FUNC-LABEL: {{^}}extract_vector_elt_v4i16:
-; SI: buffer_load_ushort
-; SI: buffer_load_ushort
-; SI: buffer_store_short
-; SI: buffer_store_short
-define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) nounwind {
- %p0 = extractelement <4 x i16> %foo, i32 0
- %p1 = extractelement <4 x i16> %foo, i32 2
- %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1
- store i16 %p1, i16 addrspace(1)* %out, align 2
- store i16 %p0, i16 addrspace(1)* %out1, align 2
- ret void
-}
Modified: llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll?rev=271082&r1=271081&r2=271082&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/insert_vector_elt.ll Fri May 27 19:51:06 2016
@@ -1,5 +1,5 @@
-; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=SI %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; FIXME: Broken on evergreen
; FIXME: For some reason the 8 and 16 vectors are being stored as
@@ -9,168 +9,294 @@
; FIXME: Why is the constant moved into the intermediate register and
; not just directly into the vector component?
-; SI-LABEL: {{^}}insertelement_v4f32_0:
-; s_load_dwordx4 s{{[}}[[LOW_REG:[0-9]+]]:
-; v_mov_b32_e32
-; v_mov_b32_e32 [[CONSTREG:v[0-9]+]], 5.000000e+00
-; v_mov_b32_e32 v[[LOW_REG]], [[CONSTREG]]
-; buffer_store_dwordx4 v{{[}}[[LOW_REG]]:
+; GCN-LABEL: {{^}}insertelement_v4f32_0:
+; GCN: s_load_dwordx4 s{{\[}}[[LOW_REG:[0-9]+]]:
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_mov_b32_e32 [[CONSTREG:v[0-9]+]], 0x40a00000
+; GCN-DAG: v_mov_b32_e32 v[[LOW_REG]], [[CONSTREG]]
+; GCN: buffer_store_dwordx4 v{{\[}}[[LOW_REG]]:
define void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}insertelement_v4f32_1:
+; GCN-LABEL: {{^}}insertelement_v4f32_1:
define void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}insertelement_v4f32_2:
+; GCN-LABEL: {{^}}insertelement_v4f32_2:
define void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}insertelement_v4f32_3:
+; GCN-LABEL: {{^}}insertelement_v4f32_3:
define void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}insertelement_v4i32_0:
+; GCN-LABEL: {{^}}insertelement_v4i32_0:
define void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind {
%vecins = insertelement <4 x i32> %a, i32 999, i32 0
store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v2f32:
-; SI: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
-; SI: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
-; SI: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
+; GCN-LABEL: {{^}}insertelement_v3f32_1:
+define void @insertelement_v3f32_1(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+ %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 1
+ store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; GCN-LABEL: {{^}}insertelement_v3f32_2:
+define void @insertelement_v3f32_2(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+ %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 2
+ store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; GCN-LABEL: {{^}}insertelement_v3f32_3:
+define void @insertelement_v3f32_3(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind {
+ %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 3
+ store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; GCN-LABEL: {{^}}dynamic_insertelement_v2f32:
+; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
+; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; GCN: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
define void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, i32 %b) nounwind {
%vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 %b
store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 8
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v4f32:
-; SI: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
-; SI: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
-; SI: buffer_store_dwordx4 {{v\[}}[[LOW_RESULT_REG]]:
+; GCN-LABEL: {{^}}dynamic_insertelement_v3f32:
+; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
+; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; GCN-DAG: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
+; GCN-DAG: buffer_store_dword v
+define void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %a, i32 %b) nounwind {
+ %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 %b
+ store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16
+ ret void
+}
+
+; GCN-LABEL: {{^}}dynamic_insertelement_v4f32:
+; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000
+; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; GCN: buffer_store_dwordx4 {{v\[}}[[LOW_RESULT_REG]]:
define void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %b) nounwind {
%vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %b
store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v8f32:
-; SI: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
+; GCN-LABEL: {{^}}dynamic_insertelement_v8f32:
+; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind {
%vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b
store <8 x float> %vecins, <8 x float> addrspace(1)* %out, align 32
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v16f32:
-; SI: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
+; GCN-LABEL: {{^}}dynamic_insertelement_v16f32:
+; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
define void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %b) nounwind {
%vecins = insertelement <16 x float> %a, float 5.000000e+00, i32 %b
store <16 x float> %vecins, <16 x float> addrspace(1)* %out, align 64
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v2i32:
-; SI: buffer_store_dwordx2
+; GCN-LABEL: {{^}}dynamic_insertelement_v2i32:
+; GCN: v_movreld_b32
+; GCN: buffer_store_dwordx2
define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i32> %a, i32 5, i32 %b
store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 8
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v4i32:
-; SI: buffer_store_dwordx4
+; GCN-LABEL: {{^}}dynamic_insertelement_v3i32:
+; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 5
+; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]]
+; GCN-DAG: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]:
+; GCN-DAG: buffer_store_dword v
+define void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, i32 %b) nounwind {
+ %vecins = insertelement <3 x i32> %a, i32 5, i32 %b
+ store <3 x i32> %vecins, <3 x i32> addrspace(1)* %out, align 16
+ ret void
+}
+
+; GCN-LABEL: {{^}}dynamic_insertelement_v4i32:
+; GCN: v_movreld_b32
+; GCN: buffer_store_dwordx4
define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i32> %a, i32 5, i32 %b
store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v8i32:
-; FIXMESI: buffer_store_dwordx4
-; FIXMESI: buffer_store_dwordx4
+; GCN-LABEL: {{^}}dynamic_insertelement_v8i32:
+; GCN: v_movreld_b32
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <8 x i32> %a, i32 5, i32 %b
store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v16i32:
-; FIXMESI: buffer_store_dwordx4
-; FIXMESI: buffer_store_dwordx4
-; FIXMESI: buffer_store_dwordx4
-; FIXMESI: buffer_store_dwordx4
+; GCN-LABEL: {{^}}dynamic_insertelement_v16i32:
+; GCN: v_movreld_b32
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
define void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, i32 %b) nounwind {
%vecins = insertelement <16 x i32> %a, i32 5, i32 %b
store <16 x i32> %vecins, <16 x i32> addrspace(1)* %out, align 64
ret void
}
-
-; SI-LABEL: {{^}}dynamic_insertelement_v2i16:
-; FIXMESI: buffer_store_dwordx2
+; GCN-LABEL: {{^}}dynamic_insertelement_v2i16:
define void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i16> %a, i16 5, i32 %b
store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out, align 8
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v4i16:
-; FIXMESI: buffer_store_dwordx4
+; GCN-LABEL: {{^}}dynamic_insertelement_v3i16:
+define void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, i32 %b) nounwind {
+ %vecins = insertelement <3 x i16> %a, i16 5, i32 %b
+ store <3 x i16> %vecins, <3 x i16> addrspace(1)* %out, align 8
+ ret void
+}
+
+; GCN-LABEL: {{^}}dynamic_insertelement_v4i16:
+; GCN: buffer_load_ushort v{{[0-9]+}}, off
+; GCN: buffer_load_ushort v{{[0-9]+}}, off
+; GCN: buffer_load_ushort v{{[0-9]+}}, off
+; GCN: buffer_load_ushort v{{[0-9]+}}, off
+
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:6
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:4
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+; GCN: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+; GCN: buffer_load_ushort
+
+; GCN: buffer_store_short v{{[0-9]+}}, off
+; GCN: buffer_store_short v{{[0-9]+}}, off
+; GCN: buffer_store_short v{{[0-9]+}}, off
+; GCN: buffer_store_short v{{[0-9]+}}, off
define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i16> %a, i16 5, i32 %b
- store <4 x i16> %vecins, <4 x i16> addrspace(1)* %out, align 16
+ store <4 x i16> %vecins, <4 x i16> addrspace(1)* %out, align 8
ret void
}
+; GCN-LABEL: {{^}}dynamic_insertelement_v2i8:
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:1
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+
+; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
-; SI-LABEL: {{^}}dynamic_insertelement_v2i8:
-; FIXMESI: BUFFER_STORE_USHORT
+; GCN: buffer_store_byte v{{[0-9]+}}, off
+; GCN: buffer_store_byte v{{[0-9]+}}, off
define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i8> %a, i8 5, i32 %b
store <2 x i8> %vecins, <2 x i8> addrspace(1)* %out, align 8
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v4i8:
-; FIXMESI: buffer_store_dword
+; GCN-LABEL: {{^}}dynamic_insertelement_v3i8:
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:1
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+
+; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off
+; GCN-DAG: buffer_store_short v{{[0-9]+}}, off
+define void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> %a, i32 %b) nounwind {
+ %vecins = insertelement <3 x i8> %a, i8 5, i32 %b
+ store <3 x i8> %vecins, <3 x i8> addrspace(1)* %out, align 4
+ ret void
+}
+
+; GCN-LABEL: {{^}}dynamic_insertelement_v4i8:
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+; GCN: buffer_load_ubyte v{{[0-9]+}}, off
+
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:3
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:1
+; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+
+; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
+
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+; GCN: buffer_load_ubyte
+
+; GCN: buffer_store_byte v{{[0-9]+}}, off
+; GCN: buffer_store_byte v{{[0-9]+}}, off
+; GCN: buffer_store_byte v{{[0-9]+}}, off
+; GCN: buffer_store_byte v{{[0-9]+}}, off
define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <4 x i8> %a, i8 5, i32 %b
- store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 16
+ store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 4
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v8i8:
-; FIXMESI: buffer_store_dwordx2
+; GCN-LABEL: {{^}}dynamic_insertelement_v8i8:
define void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <8 x i8> %a, i8 5, i32 %b
- store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 16
+ store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 8
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v16i8:
-; FIXMESI: buffer_store_dwordx4
+; GCN-LABEL: {{^}}dynamic_insertelement_v16i8:
define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind {
%vecins = insertelement <16 x i8> %a, i8 5, i32 %b
store <16 x i8> %vecins, <16 x i8> addrspace(1)* %out, align 16
@@ -179,7 +305,7 @@ define void @dynamic_insertelement_v16i8
; This test requires handling INSERT_SUBREG in SIFixSGPRCopies. Check that
; the compiler doesn't crash.
-; SI-LABEL: {{^}}insert_split_bb:
+; GCN-LABEL: {{^}}insert_split_bb:
define void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b) {
entry:
%0 = insertelement <2 x i32> undef, i32 %a, i32 0
@@ -203,30 +329,30 @@ endif:
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v2f64:
-; SI: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x11|0x44}}{{$}}
-; SI-DAG: s_lshl_b32 [[SCALEDIDX:s[0-9]+]], [[IDX]], 1{{$}}
-; SI-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 0{{$}}
-
-; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-LABEL: {{^}}dynamic_insertelement_v2f64:
+; GCN: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x11|0x44}}{{$}}
+; GCN-DAG: s_lshl_b32 [[SCALEDIDX:s[0-9]+]], [[IDX]], 1{{$}}
+; GCN-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 0{{$}}
+
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
-; SI: s_mov_b32 m0, [[SCALEDIDX]]
-; SI: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]]
+; GCN: s_mov_b32 m0, [[SCALEDIDX]]
+; GCN: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]]
; Increment to next element.
; FIXME: Should be able to manipulate m0 directly instead of add and
; copy.
-; SI: s_or_b32 [[IDX1:s[0-9]+]], [[SCALEDIDX]], 1
-; SI-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0x40200000
-; SI-DAG: s_mov_b32 m0, [[IDX1]]
-; SI: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]]
+; GCN: s_or_b32 [[IDX1:s[0-9]+]], [[SCALEDIDX]], 1
+; GCN-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0x40200000
+; GCN-DAG: s_mov_b32 m0, [[IDX1]]
+; GCN: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]]
-; SI: buffer_store_dwordx4
-; SI: s_endpgm
+; GCN: buffer_store_dwordx4
+; GCN: s_endpgm
define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, i32 %b) nounwind {
%vecins = insertelement <2 x double> %a, double 8.0, i32 %b
store <2 x double> %vecins, <2 x double> addrspace(1)* %out, align 16
@@ -234,44 +360,52 @@ define void @dynamic_insertelement_v2f64
}
; FIXME: Inline immediate should be folded into v_movreld_b32.
-; SI-LABEL: {{^}}dynamic_insertelement_v2i64:
+; GCN-LABEL: {{^}}dynamic_insertelement_v2i64:
-; SI-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 5{{$}}
-; SI-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0{{$}}
+; GCN-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 5{{$}}
+; GCN-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0{{$}}
-; SI-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]]
-; SI-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]]
+; GCN-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]]
+; GCN-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]]
-; SI: buffer_store_dwordx4
-; SI: s_endpgm
+; GCN: buffer_store_dwordx4
+; GCN: s_endpgm
define void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %a, i32 %b) nounwind {
%vecins = insertelement <2 x i64> %a, i64 5, i32 %b
store <2 x i64> %vecins, <2 x i64> addrspace(1)* %out, align 8
ret void
}
+; GCN-LABEL: {{^}}dynamic_insertelement_v3i64:
+define void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %a, i32 %b) nounwind {
+ %vecins = insertelement <3 x i64> %a, i64 5, i32 %b
+ store <3 x i64> %vecins, <3 x i64> addrspace(1)* %out, align 32
+ ret void
+}
+
; FIXME: Should be able to do without stack access. The used stack
; space is also 2x what should be required.
-; SI-LABEL: {{^}}dynamic_insertelement_v4f64:
-; SI: SCRATCH_RSRC_DWORD
+; GCN-LABEL: {{^}}dynamic_insertelement_v4f64:
+; GCN: SCRATCH_RSRC_DWORD
; Stack store
-; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
-; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
+
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
; Write element
-; SI: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
; Stack reload
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
; Store result
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
-; SI: s_endpgm
-; SI: ScratchSize: 64
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: s_endpgm
+; GCN: ScratchSize: 64
define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, i32 %b) nounwind {
%vecins = insertelement <4 x double> %a, double 8.0, i32 %b
@@ -279,27 +413,27 @@ define void @dynamic_insertelement_v4f64
ret void
}
-; SI-LABEL: {{^}}dynamic_insertelement_v8f64:
-; SI: SCRATCH_RSRC_DWORD
+; GCN-LABEL: {{^}}dynamic_insertelement_v8f64:
+; GCN: SCRATCH_RSRC_DWORD
-; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
-; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
-; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:32{{$}}
-; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:48{{$}}
-
-; SI: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
-
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
-; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
-
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
-; SI: buffer_store_dwordx4
-; SI: s_endpgm
-; SI: ScratchSize: 128
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:32{{$}}
+; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:48{{$}}
+
+; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}}
+; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}}
+
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: buffer_store_dwordx4
+; GCN: s_endpgm
+; GCN: ScratchSize: 128
define void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) nounwind {
%vecins = insertelement <8 x double> %a, double 8.0, i32 %b
store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16
More information about the llvm-commits
mailing list