[llvm] r280535 - AMDGPU/R600: EXTRACT_VECT_ELT should only bypass BUILD_VECTOR if the vectors have the same number of elements.

Jan Vesely via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 2 13:13:20 PDT 2016


Author: jvesely
Date: Fri Sep  2 15:13:19 2016
New Revision: 280535

URL: http://llvm.org/viewvc/llvm-project?rev=280535&view=rev
Log:
AMDGPU/R600: EXTRACT_VECT_ELT should only bypass BUILD_VECTOR if the vectors have the same number of elements.

Fixes R600 piglit regressions since r280298

Differential Revision: https://reviews.llvm.org/D24174

Added:
    llvm/trunk/test/CodeGen/AMDGPU/amdgcn.bitcast.ll
      - copied, changed from r280534, llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll
    llvm/trunk/test/CodeGen/AMDGPU/r600.bitcast.ll
    llvm/trunk/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll
Removed:
    llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp?rev=280535&r1=280534&r2=280535&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/R600ISelLowering.cpp Fri Sep  2 15:13:19 2016
@@ -1820,7 +1820,9 @@ SDValue R600TargetLowering::PerformDAGCo
       }
     }
     if (Arg.getOpcode() == ISD::BITCAST &&
-        Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
+        Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
+        (Arg.getOperand(0).getValueType().getVectorNumElements() ==
+         Arg.getValueType().getVectorNumElements())) {
       if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
         unsigned Element = Const->getZExtValue();
         return DAG.getNode(ISD::BITCAST, DL, N->getVTList(),

Copied: llvm/trunk/test/CodeGen/AMDGPU/amdgcn.bitcast.ll (from r280534, llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll)
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/amdgcn.bitcast.ll?p2=llvm/trunk/test/CodeGen/AMDGPU/amdgcn.bitcast.ll&p1=llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll&r1=280534&r2=280535&rev=280535&view=diff
==============================================================================
    (empty)

Removed: llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll?rev=280534&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/bitcast.ll (removed)
@@ -1,109 +0,0 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-
-; This test just checks that the compiler doesn't crash.
-
-declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
-
-; FUNC-LABEL: {{^}}v32i8_to_v8i32:
-; SI: s_endpgm
-define amdgpu_ps void @v32i8_to_v8i32(<32 x i8> addrspace(2)* inreg) #0 {
-entry:
-  %1 = load <32 x i8>, <32 x i8> addrspace(2)* %0
-  %2 = bitcast <32 x i8> %1 to <8 x i32>
-  %3 = extractelement <8 x i32> %2, i32 1
-  %4 = icmp ne i32 %3, 0
-  %5 = select i1 %4, float 0.0, float 1.0
-  call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %5, float %5, float %5, float %5)
-  ret void
-}
-
-; FUNC-LABEL: {{^}}i8ptr_v16i8ptr:
-; SI: s_endpgm
-define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
-entry:
-  %0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
-  %1 = load <16 x i8>, <16 x i8> addrspace(1)* %0
-  store <16 x i8> %1, <16 x i8> addrspace(1)* %out
-  ret void
-}
-
-define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
-  %load = load float, float addrspace(1)* %in, align 4
-  %bc = bitcast float %load to <2 x i16>
-  store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4
-  ret void
-}
-
-define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
-  %load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
-  %bc = bitcast <2 x i16> %load to float
-  store float %bc, float addrspace(1)* %out, align 4
-  ret void
-}
-
-define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
-  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
-  %bc = bitcast <4 x i8> %load to i32
-  store i32 %bc, i32 addrspace(1)* %out, align 4
-  ret void
-}
-
-define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
-  %load = load i32, i32 addrspace(1)* %in, align 4
-  %bc = bitcast i32 %load to <4 x i8>
-  store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
-  ret void
-}
-
-; FUNC-LABEL: {{^}}bitcast_v2i32_to_f64:
-; SI: s_endpgm
-define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
-  %val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
-  %add = add <2 x i32> %val, <i32 4, i32 9>
-  %bc = bitcast <2 x i32> %add to double
-  store double %bc, double addrspace(1)* %out, align 8
-  ret void
-}
-
-; FUNC-LABEL: {{^}}bitcast_f64_to_v2i32:
-; SI: s_endpgm
-define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
-  %val = load double, double addrspace(1)* %in, align 8
-  %add = fadd double %val, 4.0
-  %bc = bitcast double %add to <2 x i32>
-  store <2 x i32> %bc, <2 x i32> addrspace(1)* %out, align 8
-  ret void
-}
-
-; FUNC-LABEL: {{^}}bitcast_v2i64_to_v2f64:
-define void @bitcast_v2i64_to_v2f64(i32 %cond, <2 x double> addrspace(1)* %out, <2 x i64> %value) {
-entry:
-  %cmp0 = icmp eq i32 %cond, 0
-  br i1 %cmp0, label %if, label %end
-
-if:
-  %cast = bitcast <2 x i64> %value to <2 x double>
-  br label %end
-
-end:
-  %phi = phi <2 x double> [zeroinitializer, %entry], [%cast, %if]
-  store <2 x double> %phi, <2 x double> addrspace(1)* %out
-  ret void
-}
-
-; FUNC-LABEL: {{^}}bitcast_v2f64_to_v2i64:
-define void @bitcast_v2f64_to_v2i64(i32 %cond, <2 x i64> addrspace(1)* %out, <2 x double> %value) {
-entry:
-  %cmp0 = icmp eq i32 %cond, 0
-  br i1 %cmp0, label %if, label %end
-
-if:
-  %cast = bitcast <2 x double> %value to <2 x i64>
-  br label %end
-
-end:
-  %phi = phi <2 x i64> [zeroinitializer, %entry], [%cast, %if]
-  store <2 x i64> %phi, <2 x i64> addrspace(1)* %out
-  ret void
-}

Added: llvm/trunk/test/CodeGen/AMDGPU/r600.bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/r600.bitcast.ll?rev=280535&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/r600.bitcast.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/r600.bitcast.ll Fri Sep  2 15:13:19 2016
@@ -0,0 +1,107 @@
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+
+; This test just checks that the compiler doesn't crash.
+
+
+; FUNC-LABEL: {{^}}i8ptr_v16i8ptr:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.XYZW]], [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_128 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
+; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) {
+entry:
+  %0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)*
+  %1 = load <16 x i8>, <16 x i8> addrspace(1)* %0
+  store <16 x i8> %1, <16 x i8> addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}f32_to_v2i16:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
+; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
+  %load = load float, float addrspace(1)* %in, align 4
+  %bc = bitcast float %load to <2 x i16>
+  store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4
+  ret void
+}
+
+; FUNC-LABEL: {{^}}v2i16_to_f32:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
+; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
+  %load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
+  %bc = bitcast <2 x i16> %load to float
+  store float %bc, float addrspace(1)* %out, align 4
+  ret void
+}
+
+; FUNC-LABEL: {{^}}v4i8_to_i32:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
+; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind {
+  %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4
+  %bc = bitcast <4 x i8> %load to i32
+  store i32 %bc, i32 addrspace(1)* %out, align 4
+  ret void
+}
+
+; FUNC-LABEL: {{^}}i32_to_v4i8:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
+; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
+  %load = load i32, i32 addrspace(1)* %in, align 4
+  %bc = bitcast i32 %load to <4 x i8>
+  store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
+  ret void
+}
+
+; FUNC-LABEL: {{^}}v2i16_to_v4i8:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
+; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @v2i16_to_v4i8(<4 x i8> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind {
+  %load = load <2 x i16>, <2 x i16>  addrspace(1)* %in, align 4
+  %bc = bitcast <2 x i16> %load to <4 x i8>
+  store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4
+  ret void
+}
+
+; This just checks for crash in BUILD_VECTOR/EXTRACT_ELEMENT combine
+; the stack manipulation is tricky to follow
+; TODO: This should only use one load
+; FUNC-LABEL: {{^}}v4i16_extract_i8:
+; EG: MEM_RAT MSKOR {{T[0-9]+\.XW}}, [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_16
+; EG: VTX_READ_16
+; EG-DAG: BFE_UINT
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @v4i16_extract_i8(i8 addrspace(1)* %out, <4 x i16> addrspace(1)* %in) nounwind {
+  %load = load <4 x i16>, <4 x i16>  addrspace(1)* %in, align 2
+  %bc = bitcast <4 x i16> %load to <8 x i8>
+  %element = extractelement <8 x i8> %bc, i32 5
+  store i8 %element, i8 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}bitcast_v2i32_to_f64:
+; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.XY]], [[ST_PTR:T[0-9]+\.[XYZW]]]
+; EG: VTX_READ_64 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]]
+; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z
+; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal
+define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
+  %val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8
+  %bc = bitcast <2 x i32> %val to double
+  store double %bc, double addrspace(1)* %out, align 8
+  ret void
+}
+

Added: llvm/trunk/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll?rev=280535&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll Fri Sep  2 15:13:19 2016
@@ -0,0 +1,46 @@
+; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; XFAIL: *
+
+; This is the failing part of the r600 bitacts tests
+
+; TODO: enable doubles
+; FUNC-LABEL: {{^}}bitcast_f64_to_v2i32:
+define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) {
+  %val = load double, double addrspace(1)* %in, align 8
+  %add = fadd double %val, 4.0
+  %bc = bitcast double %add to <2 x i32>
+  store <2 x i32> %bc, <2 x i32> addrspace(1)* %out, align 8
+  ret void
+}
+
+; FUNC-LABEL: {{^}}bitcast_v2i64_to_v2f64:
+define void @bitcast_v2i64_to_v2f64(i32 %cond, <2 x double> addrspace(1)* %out, <2 x i64> %value) {
+entry:
+  %cmp0 = icmp eq i32 %cond, 0
+  br i1 %cmp0, label %if, label %end
+
+if:
+  %cast = bitcast <2 x i64> %value to <2 x double>
+  br label %end
+
+end:
+  %phi = phi <2 x double> [zeroinitializer, %entry], [%cast, %if]
+  store <2 x double> %phi, <2 x double> addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}bitcast_v2f64_to_v2i64:
+define void @bitcast_v2f64_to_v2i64(i32 %cond, <2 x i64> addrspace(1)* %out, <2 x double> %value) {
+entry:
+  %cmp0 = icmp eq i32 %cond, 0
+  br i1 %cmp0, label %if, label %end
+
+if:
+  %cast = bitcast <2 x double> %value to <2 x i64>
+  br label %end
+
+end:
+  %phi = phi <2 x i64> [zeroinitializer, %entry], [%cast, %if]
+  store <2 x i64> %phi, <2 x i64> addrspace(1)* %out
+  ret void
+}




More information about the llvm-commits mailing list